aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpica/acconfig.h2
-rw-r--r--drivers/acpi/apei/Kconfig1
-rw-r--r--drivers/acpi/apei/apei-base.c2
-rw-r--r--drivers/ata/Kconfig9
-rw-r--r--drivers/ata/Makefile1
-rw-r--r--drivers/ata/pata_at91.c2
-rw-r--r--drivers/ata/pata_imx.c253
-rw-r--r--drivers/ata/pata_via.c18
-rw-r--r--drivers/ata/sata_dwc_460ex.c14
-rw-r--r--drivers/ata/sata_sil.c2
-rw-r--r--drivers/base/devres.c1
-rw-r--r--drivers/base/devtmpfs.c2
-rw-r--r--drivers/base/firmware_class.c11
-rw-r--r--drivers/base/platform.c2
-rw-r--r--drivers/base/power/clock_ops.c111
-rw-r--r--drivers/base/power/domain.c30
-rw-r--r--drivers/base/regmap/regmap-i2c.c1
-rw-r--r--drivers/base/regmap/regmap-spi.c3
-rw-r--r--drivers/base/regmap/regmap.c7
-rw-r--r--drivers/bcma/main.c12
-rw-r--r--drivers/block/Kconfig17
-rw-r--r--drivers/block/drbd/drbd_nl.c4
-rw-r--r--drivers/block/floppy.c8
-rw-r--r--drivers/block/loop.c297
-rw-r--r--drivers/block/swim3.c1
-rw-r--r--drivers/block/xen-blkback/common.h2
-rw-r--r--drivers/block/xen-blkback/xenbus.c6
-rw-r--r--drivers/block/xen-blkfront.c6
-rw-r--r--drivers/bluetooth/ath3k.c1
-rw-r--r--drivers/bluetooth/btusb.c19
-rw-r--r--drivers/bluetooth/btwilink.c16
-rw-r--r--drivers/cdrom/cdrom.c8
-rw-r--r--drivers/char/msm_smd_pkt.c5
-rw-r--r--drivers/char/tpm/Kconfig1
-rw-r--r--drivers/char/tpm/tpm.c9
-rw-r--r--drivers/char/tpm/tpm_nsc.c2
-rw-r--r--drivers/clocksource/sh_cmt.c34
-rw-r--r--drivers/cpufreq/pcc-cpufreq.c3
-rw-r--r--drivers/dma/amba-pl08x.c1
-rw-r--r--drivers/dma/ste_dma40.c42
-rw-r--r--drivers/edac/i7core_edac.c2
-rw-r--r--drivers/firewire/core-cdev.c24
-rw-r--r--drivers/firewire/core-device.c15
-rw-r--r--drivers/firewire/ohci.c12
-rw-r--r--drivers/firewire/sbp2.c4
-rw-r--r--drivers/firmware/google/gsmi.c2
-rw-r--r--drivers/gpio/Kconfig9
-rw-r--r--drivers/gpio/Makefile7
-rw-r--r--drivers/gpio/gpio-davinci.c455
-rw-r--r--drivers/gpio/gpio-ep93xx.c22
-rw-r--r--drivers/gpio/gpio-generic.c15
-rw-r--r--drivers/gpio/gpio-ks8695.c319
-rw-r--r--drivers/gpio/gpio-lpc32xx.c446
-rw-r--r--drivers/gpio/gpio-nomadik.c3
-rw-r--r--drivers/gpio/gpio-omap.c2
-rw-r--r--drivers/gpio/gpio-pxa.c338
-rw-r--r--drivers/gpio/gpio-sa1100.c63
-rw-r--r--drivers/gpio/gpio-tegra.c23
-rw-r--r--drivers/gpio/gpio-tnetv107x.c205
-rw-r--r--drivers/gpio/gpio-u300.c1190
-rw-r--r--drivers/gpu/drm/drm_crtc.c2
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c1
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c4
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h4
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c6
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h17
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c3
-rw-r--r--drivers/gpu/drm/i915/intel_display.c213
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c7
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h6
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c82
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c1
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c72
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c4
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c88
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c7
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c15
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c12
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c28
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c88
-rw-r--r--drivers/gpu/drm/radeon/ni.c48
-rw-r--r--drivers/gpu/drm/radeon/r100.c22
-rw-r--r--drivers/gpu/drm/radeon/r200.c4
-rw-r--r--drivers/gpu/drm/radeon/r600.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon.h7
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h8
-rw-r--r--drivers/gpu/drm/radeon/radeon_clocks.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c80
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c40
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c21
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c34
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c30
-rw-r--r--drivers/gpu/drm/radeon/rv770.c51
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c15
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c2
-rw-r--r--drivers/hid/Kconfig1
-rw-r--r--drivers/hid/hid-apple.c6
-rw-r--r--drivers/hid/hid-core.c3
-rw-r--r--drivers/hid/hid-ids.h7
-rw-r--r--drivers/hid/hid-magicmouse.c66
-rw-r--r--drivers/hid/hid-wacom.c24
-rw-r--r--drivers/hid/hid-wiimote.c277
-rw-r--r--drivers/hid/usbhid/hid-quirks.c2
-rw-r--r--drivers/hwmon/coretemp.c216
-rw-r--r--drivers/hwmon/ds620.c2
-rw-r--r--drivers/hwmon/i5k_amb.c42
-rw-r--r--drivers/hwmon/ibmaem.c15
-rw-r--r--drivers/hwmon/max16065.c2
-rw-r--r--drivers/hwmon/ntc_thermistor.c3
-rw-r--r--drivers/hwmon/pmbus/lm25066.c12
-rw-r--r--drivers/hwmon/pmbus/pmbus.h1
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c29
-rw-r--r--drivers/hwmon/pmbus/ucd9000.c6
-rw-r--r--drivers/hwmon/pmbus/ucd9200.c6
-rw-r--r--drivers/hwmon/w83791d.c4
-rw-r--r--drivers/i2c/busses/i2c-ixp2000.c2
-rw-r--r--drivers/i2c/busses/i2c-nomadik.c9
-rw-r--r--drivers/i2c/busses/i2c-omap.c29
-rw-r--r--drivers/i2c/busses/i2c-pxa-pci.c5
-rw-r--r--drivers/i2c/busses/i2c-tegra.c60
-rw-r--r--drivers/ide/at91_ide.c2
-rw-r--r--drivers/ide/ide-disk.c7
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c10
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c8
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c10
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h2
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c2
-rw-r--r--drivers/input/joystick/analog.c2
-rw-r--r--drivers/input/keyboard/adp5588-keys.c1
-rw-r--r--drivers/input/keyboard/ep93xx_keypad.c1
-rw-r--r--drivers/input/keyboard/omap-keypad.c2
-rw-r--r--drivers/input/keyboard/tegra-kbc.c2
-rw-r--r--drivers/input/misc/ad714x-i2c.c81
-rw-r--r--drivers/input/misc/ad714x-spi.c68
-rw-r--r--drivers/input/misc/ad714x.c116
-rw-r--r--drivers/input/misc/ad714x.h35
-rw-r--r--drivers/input/misc/cm109.c2
-rw-r--r--drivers/input/misc/mma8450.c2
-rw-r--r--drivers/input/misc/mpu3050.c2
-rw-r--r--drivers/input/mouse/bcm5974.c60
-rw-r--r--drivers/input/tablet/wacom_sys.c31
-rw-r--r--drivers/input/tablet/wacom_wac.c49
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c12
-rw-r--r--drivers/input/touchscreen/max11801_ts.c3
-rw-r--r--drivers/input/touchscreen/tnetv107x-ts.c1
-rw-r--r--drivers/input/touchscreen/wacom_w8001.c2
-rw-r--r--drivers/iommu/amd_iommu.c18
-rw-r--r--drivers/iommu/dmar.c2
-rw-r--r--drivers/leds/leds-ams-delta.c1
-rw-r--r--drivers/leds/leds-bd2802.c5
-rw-r--r--drivers/leds/leds-hp6xx.c1
-rw-r--r--drivers/leds/ledtrig-timer.c2
-rw-r--r--drivers/md/linear.h2
-rw-r--r--drivers/md/md.c28
-rw-r--r--drivers/md/raid1.c14
-rw-r--r--drivers/md/raid10.c47
-rw-r--r--drivers/md/raid5.c2
-rw-r--r--drivers/media/dvb/dvb-usb/vp7045.c26
-rw-r--r--drivers/media/rc/nuvoton-cir.c45
-rw-r--r--drivers/media/rc/nuvoton-cir.h1
-rw-r--r--drivers/media/video/gspca/ov519.c22
-rw-r--r--drivers/media/video/gspca/sonixj.c6
-rw-r--r--drivers/media/video/omap/omap_vout.c13
-rw-r--r--drivers/media/video/omap3isp/ispccdc.c1
-rw-r--r--drivers/media/video/pwc/pwc-v4l.c2
-rw-r--r--drivers/media/video/uvc/uvc_driver.c2
-rw-r--r--drivers/media/video/uvc/uvc_entity.c2
-rw-r--r--drivers/media/video/uvc/uvc_video.c10
-rw-r--r--drivers/media/video/uvc/uvcvideo.h2
-rw-r--r--drivers/media/video/v4l2-dev.c11
-rw-r--r--drivers/media/video/v4l2-device.c2
-rw-r--r--drivers/media/video/via-camera.c2
-rw-r--r--drivers/mfd/jz4740-adc.c2
-rw-r--r--drivers/mfd/max8997.c5
-rw-r--r--drivers/mfd/menelaus.c2
-rw-r--r--drivers/mfd/omap-usb-host.c2
-rw-r--r--drivers/mfd/tps65910-irq.c2
-rw-r--r--drivers/mfd/twl4030-madc.c5
-rw-r--r--drivers/mfd/wm8350-gpio.c4
-rw-r--r--drivers/misc/Kconfig1
-rw-r--r--drivers/misc/ab8500-pwm.c2
-rw-r--r--drivers/misc/cb710/core.c3
-rw-r--r--drivers/misc/fsa9480.c4
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d.c14
-rw-r--r--drivers/misc/pti.c14
-rw-r--r--drivers/misc/ti-st/st_core.c10
-rw-r--r--drivers/misc/ti-st/st_kim.c33
-rw-r--r--drivers/misc/ti-st/st_ll.c19
-rw-r--r--drivers/mmc/card/block.c3
-rw-r--r--drivers/mmc/card/mmc_test.c58
-rw-r--r--drivers/mmc/core/core.c37
-rw-r--r--drivers/mmc/core/host.c12
-rw-r--r--drivers/mmc/core/host.h8
-rw-r--r--drivers/mmc/core/mmc.c2
-rw-r--r--drivers/mmc/core/mmc_ops.c2
-rw-r--r--drivers/mmc/core/sd.c81
-rw-r--r--drivers/mmc/host/dw_mmc.c6
-rw-r--r--drivers/mmc/host/omap.c2
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c41
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c3
-rw-r--r--drivers/mmc/host/sdhci-s3c.c6
-rw-r--r--drivers/mmc/host/sdhci-tegra.c4
-rw-r--r--drivers/mmc/host/sdhci.c53
-rw-r--r--drivers/mmc/host/sh_mobile_sdhi.c4
-rw-r--r--drivers/mmc/host/tmio_mmc.c2
-rw-r--r--drivers/mtd/nand/ams-delta.c2
-rw-r--r--drivers/mtd/onenand/omap2.c2
-rw-r--r--drivers/mtd/ubi/debug.h2
-rw-r--r--drivers/net/Kconfig11
-rw-r--r--drivers/net/arm/am79c961a.c3
-rw-r--r--drivers/net/arm/at91_ether.c2
-rw-r--r--drivers/net/bnx2x/bnx2x.h124
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.c62
-rw-r--r--drivers/net/bnx2x/bnx2x_dcb.c3
-rw-r--r--drivers/net/bnx2x/bnx2x_ethtool.c48
-rw-r--r--drivers/net/bnx2x/bnx2x_link.c46
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c187
-rw-r--r--drivers/net/bnx2x/bnx2x_reg.h45
-rw-r--r--drivers/net/bnx2x/bnx2x_stats.c7
-rw-r--r--drivers/net/bonding/bond_3ad.c3
-rw-r--r--drivers/net/bonding/bond_alb.c3
-rw-r--r--drivers/net/bonding/bond_main.c31
-rw-r--r--drivers/net/can/sja1000/plx_pci.c4
-rw-r--r--drivers/net/can/slcan.c2
-rw-r--r--drivers/net/can/ti_hecc.c4
-rw-r--r--drivers/net/cassini.c3
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.c23
-rw-r--r--drivers/net/cxgb3/l2t.c15
-rw-r--r--drivers/net/cxgb3/l2t.h16
-rw-r--r--drivers/net/cxgb4/cxgb4_main.c3
-rw-r--r--drivers/net/e1000/e1000_hw.c6
-rw-r--r--drivers/net/e1000e/82571.c6
-rw-r--r--drivers/net/e1000e/e1000.h5
-rw-r--r--drivers/net/e1000e/ethtool.c3
-rw-r--r--drivers/net/e1000e/ich8lan.c65
-rw-r--r--drivers/net/e1000e/lib.c7
-rw-r--r--drivers/net/e1000e/netdev.c89
-rw-r--r--drivers/net/forcedeth.c3
-rw-r--r--drivers/net/gianfar.c9
-rw-r--r--drivers/net/gianfar_ethtool.c34
-rw-r--r--drivers/net/gianfar_ptp.c9
-rw-r--r--drivers/net/greth.c12
-rw-r--r--drivers/net/greth.h1
-rw-r--r--drivers/net/ibmveth.c64
-rw-r--r--drivers/net/irda/sh_irda.c2
-rw-r--r--drivers/net/irda/sh_sir.c4
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c8
-rw-r--r--drivers/net/netconsole.c8
-rw-r--r--drivers/net/pch_gbe/pch_gbe.h12
-rw-r--r--drivers/net/pch_gbe/pch_gbe_main.c346
-rw-r--r--drivers/net/pcnet32.c2
-rw-r--r--drivers/net/phy/dp83640.c9
-rw-r--r--drivers/net/phy/national.c17
-rw-r--r--drivers/net/ppp_generic.c7
-rw-r--r--drivers/net/pxa168_eth.c1
-rw-r--r--drivers/net/r8169.c32
-rw-r--r--drivers/net/rionet.c23
-rw-r--r--drivers/net/sfc/efx.c18
-rw-r--r--drivers/net/sfc/io.h6
-rw-r--r--drivers/net/sfc/mcdi.c46
-rw-r--r--drivers/net/sfc/nic.c7
-rw-r--r--drivers/net/sfc/nic.h2
-rw-r--r--drivers/net/sfc/siena.c25
-rw-r--r--drivers/net/sfc/workarounds.h2
-rw-r--r--drivers/net/sh_eth.c2
-rw-r--r--drivers/net/slip.c2
-rw-r--r--drivers/net/tg3.c2
-rw-r--r--drivers/net/usb/cdc_ncm.c47
-rw-r--r--drivers/net/usb/ipheth.c5
-rw-r--r--drivers/net/usb/rtl8150.c1
-rw-r--r--drivers/net/via-velocity.c4
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c34
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c23
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_calib.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c10
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c4
-rw-r--r--drivers/net/wireless/b43/dma.c20
-rw-r--r--drivers/net/wireless/b43/main.c3
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c21
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c39
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-rs.c13
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-core.c4
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-hcmd.c2
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-tx.c4
-rw-r--r--drivers/net/wireless/iwlegacy/iwl3945-base.c8
-rw-r--r--drivers/net/wireless/iwlegacy/iwl4965-base.c10
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-ucode.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-pci.c25
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c30
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c47
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c22
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c31
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c1
-rw-r--r--drivers/net/wireless/rtlwifi/core.c8
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.c11
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/trx.c11
-rw-r--r--drivers/net/wireless/rtlwifi/usb.c1
-rw-r--r--drivers/net/wireless/wl1251/acx.c6
-rw-r--r--drivers/net/wireless/wl1251/cmd.c2
-rw-r--r--drivers/net/wireless/wl12xx/acx.c6
-rw-r--r--drivers/net/wireless/wl12xx/main.c1
-rw-r--r--drivers/net/wireless/wl12xx/sdio.c2
-rw-r--r--drivers/net/wireless/wl12xx/testmode.c50
-rw-r--r--drivers/net/xen-netback/interface.c4
-rw-r--r--drivers/pci/hotplug/pcihp_slot.c47
-rw-r--r--drivers/pci/of.c2
-rw-r--r--drivers/pci/pci.c71
-rw-r--r--drivers/pci/pci.h4
-rw-r--r--drivers/pci/probe.c159
-rw-r--r--drivers/pci/setup-bus.c166
-rw-r--r--drivers/pci/setup-res.c152
-rw-r--r--drivers/pcmcia/pxa2xx_vpac270.c2
-rw-r--r--drivers/power/max8997_charger.c1
-rw-r--r--drivers/power/max8998_charger.c1
-rw-r--r--drivers/power/s3c_adc_battery.c1
-rw-r--r--drivers/rapidio/rio-scan.c3
-rw-r--r--drivers/rtc/interface.c4
-rw-r--r--drivers/rtc/rtc-ep93xx.c16
-rw-r--r--drivers/rtc/rtc-imxdi.c1
-rw-r--r--drivers/rtc/rtc-lib.c2
-rw-r--r--drivers/rtc/rtc-s3c.c105
-rw-r--r--drivers/rtc/rtc-twl.c60
-rw-r--r--drivers/s390/block/dasd_ioctl.c10
-rw-r--r--drivers/s390/char/sclp_cmd.c6
-rw-r--r--drivers/s390/cio/cio.c8
-rw-r--r--drivers/scsi/3w-9xxx.c2
-rw-r--r--drivers/scsi/Kconfig1
-rw-r--r--drivers/scsi/Makefile2
-rw-r--r--drivers/scsi/aacraid/commsup.c2
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c2
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.c2
-rw-r--r--drivers/scsi/fcoe/fcoe.c13
-rw-r--r--drivers/scsi/hpsa.c57
-rw-r--r--drivers/scsi/isci/host.c13
-rw-r--r--drivers/scsi/isci/host.h3
-rw-r--r--drivers/scsi/isci/init.c47
-rw-r--r--drivers/scsi/isci/phy.c13
-rw-r--r--drivers/scsi/isci/registers.h12
-rw-r--r--drivers/scsi/isci/request.c30
-rw-r--r--drivers/scsi/isci/unsolicited_frame_control.c2
-rw-r--r--drivers/scsi/isci/unsolicited_frame_control.h2
-rw-r--r--drivers/scsi/libfc/fc_exch.c59
-rw-r--r--drivers/scsi/libfc/fc_fcp.c11
-rw-r--r--drivers/scsi/libfc/fc_lport.c11
-rw-r--r--drivers/scsi/libsas/sas_expander.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c36
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h5
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h29
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c282
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c109
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c25
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c30
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla4xxx/Kconfig2
-rw-r--r--drivers/sh/intc/chip.c3
-rw-r--r--drivers/spi/spi-atmel.c2
-rw-r--r--drivers/spi/spi-fsl-spi.c3
-rw-r--r--drivers/spi/spi-imx.c4
-rw-r--r--drivers/spi/spi-topcliff-pch.c93
-rw-r--r--drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsparser.c2
-rw-r--r--drivers/staging/brcm80211/brcmsmac/otp.c1
-rw-r--r--drivers/staging/brcm80211/brcmsmac/types.h1
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc.c4
-rw-r--r--drivers/staging/dt3155v4l/dt3155v4l.c1
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c1
-rw-r--r--drivers/staging/gma500/gem_glue.c23
-rw-r--r--drivers/staging/gma500/gem_glue.h2
-rw-r--r--drivers/staging/gma500/mdfld_dsi_dbi.c3
-rw-r--r--drivers/staging/gma500/mdfld_dsi_dbi.h3
-rw-r--r--drivers/staging/gma500/mdfld_dsi_dpi.c7
-rw-r--r--drivers/staging/gma500/mdfld_dsi_output.c4
-rw-r--r--drivers/staging/gma500/medfield.h2
-rw-r--r--drivers/staging/gma500/psb_drv.h1
-rw-r--r--drivers/staging/hv/blkvsc_drv.c4
-rw-r--r--drivers/staging/iio/accel/adis16203_core.c8
-rw-r--r--drivers/staging/iio/accel/adis16204_core.c8
-rw-r--r--drivers/staging/iio/accel/adis16209_core.c8
-rw-r--r--drivers/staging/iio/accel/adis16240_core.c8
-rw-r--r--drivers/staging/iio/gyro/adis16260_core.c8
-rw-r--r--drivers/staging/nvec/TODO6
-rw-r--r--drivers/staging/octeon/ethernet-rgmii.c1
-rw-r--r--drivers/staging/octeon/ethernet-spi.c1
-rw-r--r--drivers/staging/rtl8192u/r819xU_firmware.c9
-rw-r--r--drivers/staging/rts_pstor/rtsx.c109
-rw-r--r--drivers/staging/rts_pstor/rtsx.h9
-rw-r--r--drivers/staging/solo6x10/core.c1
-rw-r--r--drivers/staging/solo6x10/enc.c1
-rw-r--r--drivers/staging/solo6x10/g723.c1
-rw-r--r--drivers/staging/solo6x10/p2m.c1
-rw-r--r--drivers/staging/solo6x10/solo6x10.h1
-rw-r--r--drivers/staging/speakup/devsynth.c5
-rw-r--r--drivers/staging/tidspbridge/core/dsp-clock.c1
-rw-r--r--drivers/staging/zcache/Makefile2
-rw-r--r--drivers/staging/zcache/tmem.c2
-rw-r--r--drivers/staging/zcache/zcache-main.c (renamed from drivers/staging/zcache/zcache.c)23
-rw-r--r--drivers/target/iscsi/iscsi_target.c1
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c4
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c16
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c45
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c274
-rw-r--r--drivers/target/target_core_cdb.c92
-rw-r--r--drivers/target/target_core_device.c48
-rw-r--r--drivers/target/target_core_fabric_configfs.c2
-rw-r--r--drivers/target/target_core_pr.c8
-rw-r--r--drivers/target/target_core_rd.c24
-rw-r--r--drivers/target/target_core_tpg.c64
-rw-r--r--drivers/target/target_core_transport.c215
-rw-r--r--drivers/target/tcm_fc/tcm_fc.h12
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c90
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c13
-rw-r--r--drivers/target/tcm_fc/tfc_io.c62
-rw-r--r--drivers/tty/pty.c17
-rw-r--r--drivers/tty/serial/8250.c8
-rw-r--r--drivers/tty/serial/8250_pci.c11
-rw-r--r--drivers/tty/serial/8250_pnp.c3
-rw-r--r--drivers/tty/serial/atmel_serial.c10
-rw-r--r--drivers/tty/serial/crisv10.c4
-rw-r--r--drivers/tty/serial/max3107-aava.c2
-rw-r--r--drivers/tty/serial/max3107.c2
-rw-r--r--drivers/tty/serial/mrst_max3110.c2
-rw-r--r--drivers/tty/serial/omap-serial.c3
-rw-r--r--drivers/tty/serial/pch_uart.c3
-rw-r--r--drivers/tty/serial/samsung.c8
-rw-r--r--drivers/tty/serial/serial_core.c5
-rw-r--r--drivers/tty/serial/sh-sci.c72
-rw-r--r--drivers/tty/serial/ucc_uart.c2
-rw-r--r--drivers/tty/tty_io.c3
-rw-r--r--drivers/usb/class/usbtmc.c2
-rw-r--r--drivers/usb/core/config.c11
-rw-r--r--drivers/usb/core/hcd.c2
-rw-r--r--drivers/usb/gadget/Kconfig2
-rw-r--r--drivers/usb/gadget/at91_udc.c1
-rw-r--r--drivers/usb/gadget/composite.c6
-rw-r--r--drivers/usb/gadget/f_hid.c7
-rw-r--r--drivers/usb/gadget/f_phonet.c1
-rw-r--r--drivers/usb/gadget/fusb300_udc.c101
-rw-r--r--drivers/usb/gadget/net2272.c2
-rw-r--r--drivers/usb/gadget/s3c2410_udc.c1
-rw-r--r--drivers/usb/host/ehci-hub.c19
-rw-r--r--drivers/usb/host/ehci-mxc.c1
-rw-r--r--drivers/usb/host/ehci-omap.c16
-rw-r--r--drivers/usb/host/ehci-s5p.c1
-rw-r--r--drivers/usb/host/isp1760-hcd.c3
-rw-r--r--drivers/usb/host/ohci-pnx4008.c2
-rw-r--r--drivers/usb/host/pci-quirks.c4
-rw-r--r--drivers/usb/host/xhci-hub.c19
-rw-r--r--drivers/usb/host/xhci-ring.c109
-rw-r--r--drivers/usb/host/xhci.c47
-rw-r--r--drivers/usb/musb/Kconfig3
-rw-r--r--drivers/usb/musb/blackfin.c1
-rw-r--r--drivers/usb/musb/cppi_dma.c26
-rw-r--r--drivers/usb/musb/davinci.c2
-rw-r--r--drivers/usb/musb/musb_core.h12
-rw-r--r--drivers/usb/musb/musb_gadget.c9
-rw-r--r--drivers/usb/musb/musb_regs.h6
-rw-r--r--drivers/usb/musb/tusb6010.c1
-rw-r--r--drivers/usb/musb/tusb6010_omap.c3
-rw-r--r--drivers/usb/musb/ux500_dma.c38
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c3
-rw-r--r--drivers/usb/serial/ftdi_sio.c28
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h6
-rw-r--r--drivers/usb/serial/option.c116
-rw-r--r--drivers/usb/serial/qcserial.c2
-rw-r--r--drivers/usb/storage/unusual_devs.h10
-rw-r--r--drivers/video/atmel_lcdfb.c2
-rw-r--r--drivers/video/backlight/adp8870_bl.c2
-rw-r--r--drivers/video/backlight/backlight.c2
-rw-r--r--drivers/video/backlight/ep93xx_bl.c2
-rw-r--r--drivers/video/backlight/pwm_bl.c9
-rw-r--r--drivers/video/omap/lcd_apollon.c2
-rw-r--r--drivers/video/omap/lcd_h3.c2
-rw-r--r--drivers/video/omap/lcd_inn1610.c2
-rw-r--r--drivers/video/omap/lcd_ldp.c2
-rw-r--r--drivers/video/omap/lcd_osk.c2
-rw-r--r--drivers/video/omap/lcd_overo.c2
-rw-r--r--drivers/video/omap/lcd_palmtt.c2
-rw-r--r--drivers/video/pnx4008/sdum.c2
-rw-r--r--drivers/w1/masters/ds2490.c4
-rw-r--r--drivers/w1/masters/matrox_w1.c4
-rw-r--r--drivers/w1/slaves/w1_ds2408.c2
-rw-r--r--drivers/w1/slaves/w1_smem.c4
-rw-r--r--drivers/w1/slaves/w1_therm.c4
-rw-r--r--drivers/w1/w1.c4
-rw-r--r--drivers/w1/w1.h2
-rw-r--r--drivers/w1/w1_family.c2
-rw-r--r--drivers/w1/w1_family.h2
-rw-r--r--drivers/w1/w1_int.c2
-rw-r--r--drivers/w1/w1_int.h2
-rw-r--r--drivers/w1/w1_io.c2
-rw-r--r--drivers/w1/w1_log.h2
-rw-r--r--drivers/w1/w1_netlink.c2
-rw-r--r--drivers/w1/w1_netlink.h2
-rw-r--r--drivers/watchdog/hpwdt.c9
-rw-r--r--drivers/watchdog/lantiq_wdt.c8
-rw-r--r--drivers/watchdog/sbc_epx_c3.c2
-rw-r--r--drivers/watchdog/watchdog_dev.c14
-rw-r--r--drivers/xen/events.c40
-rw-r--r--drivers/xen/xen-selfballoon.c1
-rw-r--r--drivers/zorro/zorro.c7
517 files changed, 8960 insertions, 4307 deletions
diff --git a/drivers/acpi/acpica/acconfig.h b/drivers/acpi/acpica/acconfig.h
index bc533dde16c4..f895a244ca7e 100644
--- a/drivers/acpi/acpica/acconfig.h
+++ b/drivers/acpi/acpica/acconfig.h
@@ -121,7 +121,7 @@
121 121
122/* Maximum sleep allowed via Sleep() operator */ 122/* Maximum sleep allowed via Sleep() operator */
123 123
124#define ACPI_MAX_SLEEP 20000 /* Two seconds */ 124#define ACPI_MAX_SLEEP 2000 /* Two seconds */
125 125
126/****************************************************************************** 126/******************************************************************************
127 * 127 *
diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig
index c34aa51af4ee..e3f47872ec22 100644
--- a/drivers/acpi/apei/Kconfig
+++ b/drivers/acpi/apei/Kconfig
@@ -13,6 +13,7 @@ config ACPI_APEI_GHES
13 bool "APEI Generic Hardware Error Source" 13 bool "APEI Generic Hardware Error Source"
14 depends on ACPI_APEI && X86 14 depends on ACPI_APEI && X86
15 select ACPI_HED 15 select ACPI_HED
16 select IRQ_WORK
16 select LLIST 17 select LLIST
17 select GENERIC_ALLOCATOR 18 select GENERIC_ALLOCATOR
18 help 19 help
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c
index 8041248fce9b..61540360d5ce 100644
--- a/drivers/acpi/apei/apei-base.c
+++ b/drivers/acpi/apei/apei-base.c
@@ -618,7 +618,7 @@ int apei_osc_setup(void)
618 }; 618 };
619 619
620 capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE; 620 capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE;
621 capbuf[OSC_SUPPORT_TYPE] = 0; 621 capbuf[OSC_SUPPORT_TYPE] = 1;
622 capbuf[OSC_CONTROL_TYPE] = 0; 622 capbuf[OSC_CONTROL_TYPE] = 0;
623 623
624 if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle)) 624 if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle))
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index ca3e6be44a04..5987e0ba8c2d 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -468,6 +468,15 @@ config PATA_ICSIDE
468 interface card. This is not required for ICS partition support. 468 interface card. This is not required for ICS partition support.
469 If you are unsure, say N to this. 469 If you are unsure, say N to this.
470 470
471config PATA_IMX
472 tristate "PATA support for Freescale iMX"
473 depends on ARCH_MXC
474 help
475 This option enables support for the PATA host available on Freescale
476 iMX SoCs.
477
478 If unsure, say N.
479
471config PATA_IT8213 480config PATA_IT8213
472 tristate "IT8213 PATA support (Experimental)" 481 tristate "IT8213 PATA support (Experimental)"
473 depends on PCI && EXPERIMENTAL 482 depends on PCI && EXPERIMENTAL
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index 8ac64e1aa051..9550d691fd19 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -48,6 +48,7 @@ obj-$(CONFIG_PATA_HPT37X) += pata_hpt37x.o
48obj-$(CONFIG_PATA_HPT3X2N) += pata_hpt3x2n.o 48obj-$(CONFIG_PATA_HPT3X2N) += pata_hpt3x2n.o
49obj-$(CONFIG_PATA_HPT3X3) += pata_hpt3x3.o 49obj-$(CONFIG_PATA_HPT3X3) += pata_hpt3x3.o
50obj-$(CONFIG_PATA_ICSIDE) += pata_icside.o 50obj-$(CONFIG_PATA_ICSIDE) += pata_icside.o
51obj-$(CONFIG_PATA_IMX) += pata_imx.o
51obj-$(CONFIG_PATA_IT8213) += pata_it8213.o 52obj-$(CONFIG_PATA_IT8213) += pata_it8213.o
52obj-$(CONFIG_PATA_IT821X) += pata_it821x.o 53obj-$(CONFIG_PATA_IT821X) += pata_it821x.o
53obj-$(CONFIG_PATA_JMICRON) += pata_jmicron.o 54obj-$(CONFIG_PATA_JMICRON) += pata_jmicron.o
diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c
index 960c72571395..3f4c2613b3df 100644
--- a/drivers/ata/pata_at91.c
+++ b/drivers/ata/pata_at91.c
@@ -30,7 +30,7 @@
30 30
31#include <mach/at91sam9_smc.h> 31#include <mach/at91sam9_smc.h>
32#include <mach/board.h> 32#include <mach/board.h>
33#include <mach/gpio.h> 33#include <asm/gpio.h>
34 34
35#define DRV_NAME "pata_at91" 35#define DRV_NAME "pata_at91"
36#define DRV_VERSION "0.3" 36#define DRV_VERSION "0.3"
diff --git a/drivers/ata/pata_imx.c b/drivers/ata/pata_imx.c
new file mode 100644
index 000000000000..ca9d9caedfa3
--- /dev/null
+++ b/drivers/ata/pata_imx.c
@@ -0,0 +1,253 @@
1/*
2 * Freescale iMX PATA driver
3 *
4 * Copyright (C) 2011 Arnaud Patard <arnaud.patard@rtp-net.org>
5 *
6 * Based on pata_platform - Copyright (C) 2006 - 2007 Paul Mundt
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 *
12 * TODO:
13 * - dmaengine support
14 * - check if timing stuff needed
15 */
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/init.h>
19#include <linux/blkdev.h>
20#include <scsi/scsi_host.h>
21#include <linux/ata.h>
22#include <linux/libata.h>
23#include <linux/platform_device.h>
24#include <linux/clk.h>
25
26#define DRV_NAME "pata_imx"
27
28#define PATA_IMX_ATA_CONTROL 0x24
29#define PATA_IMX_ATA_CTRL_FIFO_RST_B (1<<7)
30#define PATA_IMX_ATA_CTRL_ATA_RST_B (1<<6)
31#define PATA_IMX_ATA_CTRL_IORDY_EN (1<<0)
32#define PATA_IMX_ATA_INT_EN 0x2C
33#define PATA_IMX_ATA_INTR_ATA_INTRQ2 (1<<3)
34#define PATA_IMX_DRIVE_DATA 0xA0
35#define PATA_IMX_DRIVE_CONTROL 0xD8
36
37struct pata_imx_priv {
38 struct clk *clk;
39 /* timings/interrupt/control regs */
40 u8 *host_regs;
41 u32 ata_ctl;
42};
43
44static int pata_imx_set_mode(struct ata_link *link, struct ata_device **unused)
45{
46 struct ata_device *dev;
47 struct ata_port *ap = link->ap;
48 struct pata_imx_priv *priv = ap->host->private_data;
49 u32 val;
50
51 ata_for_each_dev(dev, link, ENABLED) {
52 dev->pio_mode = dev->xfer_mode = XFER_PIO_0;
53 dev->xfer_shift = ATA_SHIFT_PIO;
54 dev->flags |= ATA_DFLAG_PIO;
55
56 val = __raw_readl(priv->host_regs + PATA_IMX_ATA_CONTROL);
57 if (ata_pio_need_iordy(dev))
58 val |= PATA_IMX_ATA_CTRL_IORDY_EN;
59 else
60 val &= ~PATA_IMX_ATA_CTRL_IORDY_EN;
61 __raw_writel(val, priv->host_regs + PATA_IMX_ATA_CONTROL);
62
63 ata_dev_printk(dev, KERN_INFO, "configured for PIO\n");
64 }
65 return 0;
66}
67
68static struct scsi_host_template pata_imx_sht = {
69 ATA_PIO_SHT(DRV_NAME),
70};
71
72static struct ata_port_operations pata_imx_port_ops = {
73 .inherits = &ata_sff_port_ops,
74 .sff_data_xfer = ata_sff_data_xfer_noirq,
75 .cable_detect = ata_cable_unknown,
76 .set_mode = pata_imx_set_mode,
77};
78
79static void pata_imx_setup_port(struct ata_ioports *ioaddr)
80{
81 /* Fixup the port shift for platforms that need it */
82 ioaddr->data_addr = ioaddr->cmd_addr + (ATA_REG_DATA << 2);
83 ioaddr->error_addr = ioaddr->cmd_addr + (ATA_REG_ERR << 2);
84 ioaddr->feature_addr = ioaddr->cmd_addr + (ATA_REG_FEATURE << 2);
85 ioaddr->nsect_addr = ioaddr->cmd_addr + (ATA_REG_NSECT << 2);
86 ioaddr->lbal_addr = ioaddr->cmd_addr + (ATA_REG_LBAL << 2);
87 ioaddr->lbam_addr = ioaddr->cmd_addr + (ATA_REG_LBAM << 2);
88 ioaddr->lbah_addr = ioaddr->cmd_addr + (ATA_REG_LBAH << 2);
89 ioaddr->device_addr = ioaddr->cmd_addr + (ATA_REG_DEVICE << 2);
90 ioaddr->status_addr = ioaddr->cmd_addr + (ATA_REG_STATUS << 2);
91 ioaddr->command_addr = ioaddr->cmd_addr + (ATA_REG_CMD << 2);
92}
93
94static int __devinit pata_imx_probe(struct platform_device *pdev)
95{
96 struct ata_host *host;
97 struct ata_port *ap;
98 struct pata_imx_priv *priv;
99 int irq = 0;
100 struct resource *io_res;
101
102 io_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
103 if (io_res == NULL)
104 return -EINVAL;
105
106 irq = platform_get_irq(pdev, 0);
107 if (irq <= 0)
108 return -EINVAL;
109
110 priv = devm_kzalloc(&pdev->dev,
111 sizeof(struct pata_imx_priv), GFP_KERNEL);
112 if (!priv)
113 return -ENOMEM;
114
115 priv->clk = clk_get(&pdev->dev, NULL);
116 if (IS_ERR(priv->clk)) {
117 dev_err(&pdev->dev, "Failed to get clock\n");
118 return PTR_ERR(priv->clk);
119 }
120
121 clk_enable(priv->clk);
122
123 host = ata_host_alloc(&pdev->dev, 1);
124 if (!host)
125 goto free_priv;
126
127 host->private_data = priv;
128 ap = host->ports[0];
129
130 ap->ops = &pata_imx_port_ops;
131 ap->pio_mask = ATA_PIO0;
132 ap->flags |= ATA_FLAG_SLAVE_POSS;
133
134 priv->host_regs = devm_ioremap(&pdev->dev, io_res->start,
135 resource_size(io_res));
136 if (!priv->host_regs) {
137 dev_err(&pdev->dev, "failed to map IO/CTL base\n");
138 goto free_priv;
139 }
140
141 ap->ioaddr.cmd_addr = priv->host_regs + PATA_IMX_DRIVE_DATA;
142 ap->ioaddr.ctl_addr = priv->host_regs + PATA_IMX_DRIVE_CONTROL;
143
144 ap->ioaddr.altstatus_addr = ap->ioaddr.ctl_addr;
145
146 pata_imx_setup_port(&ap->ioaddr);
147
148 ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx",
149 (unsigned long long)io_res->start + PATA_IMX_DRIVE_DATA,
150 (unsigned long long)io_res->start + PATA_IMX_DRIVE_CONTROL);
151
152 /* deassert resets */
153 __raw_writel(PATA_IMX_ATA_CTRL_FIFO_RST_B |
154 PATA_IMX_ATA_CTRL_ATA_RST_B,
155 priv->host_regs + PATA_IMX_ATA_CONTROL);
156 /* enable interrupts */
157 __raw_writel(PATA_IMX_ATA_INTR_ATA_INTRQ2,
158 priv->host_regs + PATA_IMX_ATA_INT_EN);
159
160 /* activate */
161 return ata_host_activate(host, irq, ata_sff_interrupt, 0,
162 &pata_imx_sht);
163
164free_priv:
165 clk_disable(priv->clk);
166 clk_put(priv->clk);
167 return -ENOMEM;
168}
169
170static int __devexit pata_imx_remove(struct platform_device *pdev)
171{
172 struct ata_host *host = dev_get_drvdata(&pdev->dev);
173 struct pata_imx_priv *priv = host->private_data;
174
175 ata_host_detach(host);
176
177 __raw_writel(0, priv->host_regs + PATA_IMX_ATA_INT_EN);
178
179 clk_disable(priv->clk);
180 clk_put(priv->clk);
181
182 return 0;
183}
184
185#ifdef CONFIG_PM
186static int pata_imx_suspend(struct device *dev)
187{
188 struct ata_host *host = dev_get_drvdata(dev);
189 struct pata_imx_priv *priv = host->private_data;
190 int ret;
191
192 ret = ata_host_suspend(host, PMSG_SUSPEND);
193 if (!ret) {
194 __raw_writel(0, priv->host_regs + PATA_IMX_ATA_INT_EN);
195 priv->ata_ctl =
196 __raw_readl(priv->host_regs + PATA_IMX_ATA_CONTROL);
197 clk_disable(priv->clk);
198 }
199
200 return ret;
201}
202
203static int pata_imx_resume(struct device *dev)
204{
205 struct ata_host *host = dev_get_drvdata(dev);
206 struct pata_imx_priv *priv = host->private_data;
207
208 clk_enable(priv->clk);
209
210 __raw_writel(priv->ata_ctl, priv->host_regs + PATA_IMX_ATA_CONTROL);
211
212 __raw_writel(PATA_IMX_ATA_INTR_ATA_INTRQ2,
213 priv->host_regs + PATA_IMX_ATA_INT_EN);
214
215 ata_host_resume(host);
216
217 return 0;
218}
219
220static const struct dev_pm_ops pata_imx_pm_ops = {
221 .suspend = pata_imx_suspend,
222 .resume = pata_imx_resume,
223};
224#endif
225
226static struct platform_driver pata_imx_driver = {
227 .probe = pata_imx_probe,
228 .remove = __devexit_p(pata_imx_remove),
229 .driver = {
230 .name = DRV_NAME,
231 .owner = THIS_MODULE,
232#ifdef CONFIG_PM
233 .pm = &pata_imx_pm_ops,
234#endif
235 },
236};
237
238static int __init pata_imx_init(void)
239{
240 return platform_driver_register(&pata_imx_driver);
241}
242
243static void __exit pata_imx_exit(void)
244{
245 platform_driver_unregister(&pata_imx_driver);
246}
247module_init(pata_imx_init);
248module_exit(pata_imx_exit);
249
250MODULE_AUTHOR("Arnaud Patard <arnaud.patard@rtp-net.org>");
251MODULE_DESCRIPTION("low-level driver for iMX PATA");
252MODULE_LICENSE("GPL");
253MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
index 65e4be6be220..8e9f5048a10a 100644
--- a/drivers/ata/pata_via.c
+++ b/drivers/ata/pata_via.c
@@ -124,6 +124,17 @@ static const struct via_isa_bridge {
124 { NULL } 124 { NULL }
125}; 125};
126 126
127static const struct dmi_system_id no_atapi_dma_dmi_table[] = {
128 {
129 .ident = "AVERATEC 3200",
130 .matches = {
131 DMI_MATCH(DMI_BOARD_VENDOR, "AVERATEC"),
132 DMI_MATCH(DMI_BOARD_NAME, "3200"),
133 },
134 },
135 { }
136};
137
127struct via_port { 138struct via_port {
128 u8 cached_device; 139 u8 cached_device;
129}; 140};
@@ -355,6 +366,13 @@ static unsigned long via_mode_filter(struct ata_device *dev, unsigned long mask)
355 mask &= ~ ATA_MASK_UDMA; 366 mask &= ~ ATA_MASK_UDMA;
356 } 367 }
357 } 368 }
369
370 if (dev->class == ATA_DEV_ATAPI &&
371 dmi_check_system(no_atapi_dma_dmi_table)) {
372 ata_dev_warn(dev, "controller locks up on ATAPI DMA, forcing PIO\n");
373 mask &= ATA_MASK_PIO;
374 }
375
358 return mask; 376 return mask;
359} 377}
360 378
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
index 0a9a774a7e1e..5c4237452f50 100644
--- a/drivers/ata/sata_dwc_460ex.c
+++ b/drivers/ata/sata_dwc_460ex.c
@@ -1329,7 +1329,7 @@ static int sata_dwc_port_start(struct ata_port *ap)
1329 dev_err(ap->dev, "%s: dma_alloc_coherent failed\n", 1329 dev_err(ap->dev, "%s: dma_alloc_coherent failed\n",
1330 __func__); 1330 __func__);
1331 err = -ENOMEM; 1331 err = -ENOMEM;
1332 goto CLEANUP; 1332 goto CLEANUP_ALLOC;
1333 } 1333 }
1334 } 1334 }
1335 1335
@@ -1349,15 +1349,13 @@ static int sata_dwc_port_start(struct ata_port *ap)
1349 /* Clear any error bits before libata starts issuing commands */ 1349 /* Clear any error bits before libata starts issuing commands */
1350 clear_serror(); 1350 clear_serror();
1351 ap->private_data = hsdevp; 1351 ap->private_data = hsdevp;
1352 dev_dbg(ap->dev, "%s: done\n", __func__);
1353 return 0;
1352 1354
1355CLEANUP_ALLOC:
1356 kfree(hsdevp);
1353CLEANUP: 1357CLEANUP:
1354 if (err) { 1358 dev_dbg(ap->dev, "%s: fail. ap->id = %d\n", __func__, ap->print_id);
1355 sata_dwc_port_stop(ap);
1356 dev_dbg(ap->dev, "%s: fail\n", __func__);
1357 } else {
1358 dev_dbg(ap->dev, "%s: done\n", __func__);
1359 }
1360
1361 return err; 1359 return err;
1362} 1360}
1363 1361
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index 98c1d780f552..9dfb40b8c2c9 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -438,7 +438,7 @@ static void sil_host_intr(struct ata_port *ap, u32 bmdma2)
438 u8 status; 438 u8 status;
439 439
440 if (unlikely(bmdma2 & SIL_DMA_SATA_IRQ)) { 440 if (unlikely(bmdma2 & SIL_DMA_SATA_IRQ)) {
441 u32 serror; 441 u32 serror = 0xffffffff;
442 442
443 /* SIEN doesn't mask SATA IRQs on some 3112s. Those 443 /* SIEN doesn't mask SATA IRQs on some 3112s. Those
444 * controllers continue to assert IRQ as long as 444 * controllers continue to assert IRQ as long as
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index cf7a0c788052..65cd74832450 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -397,6 +397,7 @@ static int remove_nodes(struct device *dev,
397 397
398static int release_nodes(struct device *dev, struct list_head *first, 398static int release_nodes(struct device *dev, struct list_head *first,
399 struct list_head *end, unsigned long flags) 399 struct list_head *end, unsigned long flags)
400 __releases(&dev->devres_lock)
400{ 401{
401 LIST_HEAD(todo); 402 LIST_HEAD(todo);
402 int cnt; 403 int cnt;
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index 33e1bed68fdd..a4760e095ff5 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -376,7 +376,7 @@ int devtmpfs_mount(const char *mntdir)
376 return err; 376 return err;
377} 377}
378 378
379static __initdata DECLARE_COMPLETION(setup_done); 379static DECLARE_COMPLETION(setup_done);
380 380
381static int handle(const char *name, mode_t mode, struct device *dev) 381static int handle(const char *name, mode_t mode, struct device *dev)
382{ 382{
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index bbb03e6f7255..06ed6b4e7df5 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -521,11 +521,6 @@ static int _request_firmware(const struct firmware **firmware_p,
521 if (!firmware_p) 521 if (!firmware_p)
522 return -EINVAL; 522 return -EINVAL;
523 523
524 if (WARN_ON(usermodehelper_is_disabled())) {
525 dev_err(device, "firmware: %s will not be loaded\n", name);
526 return -EBUSY;
527 }
528
529 *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL); 524 *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL);
530 if (!firmware) { 525 if (!firmware) {
531 dev_err(device, "%s: kmalloc(struct firmware) failed\n", 526 dev_err(device, "%s: kmalloc(struct firmware) failed\n",
@@ -539,6 +534,12 @@ static int _request_firmware(const struct firmware **firmware_p,
539 return 0; 534 return 0;
540 } 535 }
541 536
537 if (WARN_ON(usermodehelper_is_disabled())) {
538 dev_err(device, "firmware: %s will not be loaded\n", name);
539 retval = -EBUSY;
540 goto out;
541 }
542
542 if (uevent) 543 if (uevent)
543 dev_dbg(device, "firmware: requesting %s\n", name); 544 dev_dbg(device, "firmware: requesting %s\n", name);
544 545
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 0cad9c7f6bb5..99a5272d7c2f 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -33,7 +33,7 @@ EXPORT_SYMBOL_GPL(platform_bus);
33 33
34/** 34/**
35 * arch_setup_pdev_archdata - Allow manipulation of archdata before its used 35 * arch_setup_pdev_archdata - Allow manipulation of archdata before its used
36 * @dev: platform device 36 * @pdev: platform device
37 * 37 *
38 * This is called before platform_device_add() such that any pdev_archdata may 38 * This is called before platform_device_add() such that any pdev_archdata may
39 * be setup before the platform_notifier is called. So if a user needs to 39 * be setup before the platform_notifier is called. So if a user needs to
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index a846b2f95cfb..b97294e2d95b 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -19,7 +19,7 @@
19 19
20struct pm_clk_data { 20struct pm_clk_data {
21 struct list_head clock_list; 21 struct list_head clock_list;
22 struct mutex lock; 22 spinlock_t lock;
23}; 23};
24 24
25enum pce_status { 25enum pce_status {
@@ -42,6 +42,22 @@ static struct pm_clk_data *__to_pcd(struct device *dev)
42} 42}
43 43
44/** 44/**
45 * pm_clk_acquire - Acquire a device clock.
46 * @dev: Device whose clock is to be acquired.
47 * @ce: PM clock entry corresponding to the clock.
48 */
49static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce)
50{
51 ce->clk = clk_get(dev, ce->con_id);
52 if (IS_ERR(ce->clk)) {
53 ce->status = PCE_STATUS_ERROR;
54 } else {
55 ce->status = PCE_STATUS_ACQUIRED;
56 dev_dbg(dev, "Clock %s managed by runtime PM.\n", ce->con_id);
57 }
58}
59
60/**
45 * pm_clk_add - Start using a device clock for power management. 61 * pm_clk_add - Start using a device clock for power management.
46 * @dev: Device whose clock is going to be used for power management. 62 * @dev: Device whose clock is going to be used for power management.
47 * @con_id: Connection ID of the clock. 63 * @con_id: Connection ID of the clock.
@@ -73,26 +89,23 @@ int pm_clk_add(struct device *dev, const char *con_id)
73 } 89 }
74 } 90 }
75 91
76 mutex_lock(&pcd->lock); 92 pm_clk_acquire(dev, ce);
93
94 spin_lock_irq(&pcd->lock);
77 list_add_tail(&ce->node, &pcd->clock_list); 95 list_add_tail(&ce->node, &pcd->clock_list);
78 mutex_unlock(&pcd->lock); 96 spin_unlock_irq(&pcd->lock);
79 return 0; 97 return 0;
80} 98}
81 99
82/** 100/**
83 * __pm_clk_remove - Destroy PM clock entry. 101 * __pm_clk_remove - Destroy PM clock entry.
84 * @ce: PM clock entry to destroy. 102 * @ce: PM clock entry to destroy.
85 *
86 * This routine must be called under the mutex protecting the PM list of clocks
87 * corresponding the the @ce's device.
88 */ 103 */
89static void __pm_clk_remove(struct pm_clock_entry *ce) 104static void __pm_clk_remove(struct pm_clock_entry *ce)
90{ 105{
91 if (!ce) 106 if (!ce)
92 return; 107 return;
93 108
94 list_del(&ce->node);
95
96 if (ce->status < PCE_STATUS_ERROR) { 109 if (ce->status < PCE_STATUS_ERROR) {
97 if (ce->status == PCE_STATUS_ENABLED) 110 if (ce->status == PCE_STATUS_ENABLED)
98 clk_disable(ce->clk); 111 clk_disable(ce->clk);
@@ -123,21 +136,25 @@ void pm_clk_remove(struct device *dev, const char *con_id)
123 if (!pcd) 136 if (!pcd)
124 return; 137 return;
125 138
126 mutex_lock(&pcd->lock); 139 spin_lock_irq(&pcd->lock);
127 140
128 list_for_each_entry(ce, &pcd->clock_list, node) { 141 list_for_each_entry(ce, &pcd->clock_list, node) {
129 if (!con_id && !ce->con_id) { 142 if (!con_id && !ce->con_id)
130 __pm_clk_remove(ce); 143 goto remove;
131 break; 144 else if (!con_id || !ce->con_id)
132 } else if (!con_id || !ce->con_id) {
133 continue; 145 continue;
134 } else if (!strcmp(con_id, ce->con_id)) { 146 else if (!strcmp(con_id, ce->con_id))
135 __pm_clk_remove(ce); 147 goto remove;
136 break;
137 }
138 } 148 }
139 149
140 mutex_unlock(&pcd->lock); 150 spin_unlock_irq(&pcd->lock);
151 return;
152
153 remove:
154 list_del(&ce->node);
155 spin_unlock_irq(&pcd->lock);
156
157 __pm_clk_remove(ce);
141} 158}
142 159
143/** 160/**
@@ -158,7 +175,7 @@ int pm_clk_init(struct device *dev)
158 } 175 }
159 176
160 INIT_LIST_HEAD(&pcd->clock_list); 177 INIT_LIST_HEAD(&pcd->clock_list);
161 mutex_init(&pcd->lock); 178 spin_lock_init(&pcd->lock);
162 dev->power.subsys_data = pcd; 179 dev->power.subsys_data = pcd;
163 return 0; 180 return 0;
164} 181}
@@ -175,20 +192,27 @@ void pm_clk_destroy(struct device *dev)
175{ 192{
176 struct pm_clk_data *pcd = __to_pcd(dev); 193 struct pm_clk_data *pcd = __to_pcd(dev);
177 struct pm_clock_entry *ce, *c; 194 struct pm_clock_entry *ce, *c;
195 struct list_head list;
178 196
179 if (!pcd) 197 if (!pcd)
180 return; 198 return;
181 199
182 dev->power.subsys_data = NULL; 200 dev->power.subsys_data = NULL;
201 INIT_LIST_HEAD(&list);
183 202
184 mutex_lock(&pcd->lock); 203 spin_lock_irq(&pcd->lock);
185 204
186 list_for_each_entry_safe_reverse(ce, c, &pcd->clock_list, node) 205 list_for_each_entry_safe_reverse(ce, c, &pcd->clock_list, node)
187 __pm_clk_remove(ce); 206 list_move(&ce->node, &list);
188 207
189 mutex_unlock(&pcd->lock); 208 spin_unlock_irq(&pcd->lock);
190 209
191 kfree(pcd); 210 kfree(pcd);
211
212 list_for_each_entry_safe_reverse(ce, c, &list, node) {
213 list_del(&ce->node);
214 __pm_clk_remove(ce);
215 }
192} 216}
193 217
194#endif /* CONFIG_PM */ 218#endif /* CONFIG_PM */
@@ -196,23 +220,6 @@ void pm_clk_destroy(struct device *dev)
196#ifdef CONFIG_PM_RUNTIME 220#ifdef CONFIG_PM_RUNTIME
197 221
198/** 222/**
199 * pm_clk_acquire - Acquire a device clock.
200 * @dev: Device whose clock is to be acquired.
201 * @con_id: Connection ID of the clock.
202 */
203static void pm_clk_acquire(struct device *dev,
204 struct pm_clock_entry *ce)
205{
206 ce->clk = clk_get(dev, ce->con_id);
207 if (IS_ERR(ce->clk)) {
208 ce->status = PCE_STATUS_ERROR;
209 } else {
210 ce->status = PCE_STATUS_ACQUIRED;
211 dev_dbg(dev, "Clock %s managed by runtime PM.\n", ce->con_id);
212 }
213}
214
215/**
216 * pm_clk_suspend - Disable clocks in a device's PM clock list. 223 * pm_clk_suspend - Disable clocks in a device's PM clock list.
217 * @dev: Device to disable the clocks for. 224 * @dev: Device to disable the clocks for.
218 */ 225 */
@@ -220,25 +227,23 @@ int pm_clk_suspend(struct device *dev)
220{ 227{
221 struct pm_clk_data *pcd = __to_pcd(dev); 228 struct pm_clk_data *pcd = __to_pcd(dev);
222 struct pm_clock_entry *ce; 229 struct pm_clock_entry *ce;
230 unsigned long flags;
223 231
224 dev_dbg(dev, "%s()\n", __func__); 232 dev_dbg(dev, "%s()\n", __func__);
225 233
226 if (!pcd) 234 if (!pcd)
227 return 0; 235 return 0;
228 236
229 mutex_lock(&pcd->lock); 237 spin_lock_irqsave(&pcd->lock, flags);
230 238
231 list_for_each_entry_reverse(ce, &pcd->clock_list, node) { 239 list_for_each_entry_reverse(ce, &pcd->clock_list, node) {
232 if (ce->status == PCE_STATUS_NONE)
233 pm_clk_acquire(dev, ce);
234
235 if (ce->status < PCE_STATUS_ERROR) { 240 if (ce->status < PCE_STATUS_ERROR) {
236 clk_disable(ce->clk); 241 clk_disable(ce->clk);
237 ce->status = PCE_STATUS_ACQUIRED; 242 ce->status = PCE_STATUS_ACQUIRED;
238 } 243 }
239 } 244 }
240 245
241 mutex_unlock(&pcd->lock); 246 spin_unlock_irqrestore(&pcd->lock, flags);
242 247
243 return 0; 248 return 0;
244} 249}
@@ -251,25 +256,23 @@ int pm_clk_resume(struct device *dev)
251{ 256{
252 struct pm_clk_data *pcd = __to_pcd(dev); 257 struct pm_clk_data *pcd = __to_pcd(dev);
253 struct pm_clock_entry *ce; 258 struct pm_clock_entry *ce;
259 unsigned long flags;
254 260
255 dev_dbg(dev, "%s()\n", __func__); 261 dev_dbg(dev, "%s()\n", __func__);
256 262
257 if (!pcd) 263 if (!pcd)
258 return 0; 264 return 0;
259 265
260 mutex_lock(&pcd->lock); 266 spin_lock_irqsave(&pcd->lock, flags);
261 267
262 list_for_each_entry(ce, &pcd->clock_list, node) { 268 list_for_each_entry(ce, &pcd->clock_list, node) {
263 if (ce->status == PCE_STATUS_NONE)
264 pm_clk_acquire(dev, ce);
265
266 if (ce->status < PCE_STATUS_ERROR) { 269 if (ce->status < PCE_STATUS_ERROR) {
267 clk_enable(ce->clk); 270 clk_enable(ce->clk);
268 ce->status = PCE_STATUS_ENABLED; 271 ce->status = PCE_STATUS_ENABLED;
269 } 272 }
270 } 273 }
271 274
272 mutex_unlock(&pcd->lock); 275 spin_unlock_irqrestore(&pcd->lock, flags);
273 276
274 return 0; 277 return 0;
275} 278}
@@ -344,6 +347,7 @@ int pm_clk_suspend(struct device *dev)
344{ 347{
345 struct pm_clk_data *pcd = __to_pcd(dev); 348 struct pm_clk_data *pcd = __to_pcd(dev);
346 struct pm_clock_entry *ce; 349 struct pm_clock_entry *ce;
350 unsigned long flags;
347 351
348 dev_dbg(dev, "%s()\n", __func__); 352 dev_dbg(dev, "%s()\n", __func__);
349 353
@@ -351,12 +355,12 @@ int pm_clk_suspend(struct device *dev)
351 if (!pcd || !dev->driver) 355 if (!pcd || !dev->driver)
352 return 0; 356 return 0;
353 357
354 mutex_lock(&pcd->lock); 358 spin_lock_irqsave(&pcd->lock, flags);
355 359
356 list_for_each_entry_reverse(ce, &pcd->clock_list, node) 360 list_for_each_entry_reverse(ce, &pcd->clock_list, node)
357 clk_disable(ce->clk); 361 clk_disable(ce->clk);
358 362
359 mutex_unlock(&pcd->lock); 363 spin_unlock_irqrestore(&pcd->lock, flags);
360 364
361 return 0; 365 return 0;
362} 366}
@@ -369,6 +373,7 @@ int pm_clk_resume(struct device *dev)
369{ 373{
370 struct pm_clk_data *pcd = __to_pcd(dev); 374 struct pm_clk_data *pcd = __to_pcd(dev);
371 struct pm_clock_entry *ce; 375 struct pm_clock_entry *ce;
376 unsigned long flags;
372 377
373 dev_dbg(dev, "%s()\n", __func__); 378 dev_dbg(dev, "%s()\n", __func__);
374 379
@@ -376,12 +381,12 @@ int pm_clk_resume(struct device *dev)
376 if (!pcd || !dev->driver) 381 if (!pcd || !dev->driver)
377 return 0; 382 return 0;
378 383
379 mutex_lock(&pcd->lock); 384 spin_lock_irqsave(&pcd->lock, flags);
380 385
381 list_for_each_entry(ce, &pcd->clock_list, node) 386 list_for_each_entry(ce, &pcd->clock_list, node)
382 clk_enable(ce->clk); 387 clk_enable(ce->clk);
383 388
384 mutex_unlock(&pcd->lock); 389 spin_unlock_irqrestore(&pcd->lock, flags);
385 390
386 return 0; 391 return 0;
387} 392}
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index e18566a0fedd..1c374579407c 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -460,6 +460,21 @@ static int pm_genpd_runtime_resume(struct device *dev)
460 return 0; 460 return 0;
461} 461}
462 462
463/**
464 * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use.
465 */
466void pm_genpd_poweroff_unused(void)
467{
468 struct generic_pm_domain *genpd;
469
470 mutex_lock(&gpd_list_lock);
471
472 list_for_each_entry(genpd, &gpd_list, gpd_list_node)
473 genpd_queue_power_off_work(genpd);
474
475 mutex_unlock(&gpd_list_lock);
476}
477
463#else 478#else
464 479
465static inline void genpd_power_off_work_fn(struct work_struct *work) {} 480static inline void genpd_power_off_work_fn(struct work_struct *work) {}
@@ -1255,18 +1270,3 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
1255 list_add(&genpd->gpd_list_node, &gpd_list); 1270 list_add(&genpd->gpd_list_node, &gpd_list);
1256 mutex_unlock(&gpd_list_lock); 1271 mutex_unlock(&gpd_list_lock);
1257} 1272}
1258
1259/**
1260 * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use.
1261 */
1262void pm_genpd_poweroff_unused(void)
1263{
1264 struct generic_pm_domain *genpd;
1265
1266 mutex_lock(&gpd_list_lock);
1267
1268 list_for_each_entry(genpd, &gpd_list, gpd_list_node)
1269 genpd_queue_power_off_work(genpd);
1270
1271 mutex_unlock(&gpd_list_lock);
1272}
diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c
index c2231ff06cbc..c4f7a45cd2c3 100644
--- a/drivers/base/regmap/regmap-i2c.c
+++ b/drivers/base/regmap/regmap-i2c.c
@@ -113,3 +113,4 @@ struct regmap *regmap_init_i2c(struct i2c_client *i2c,
113} 113}
114EXPORT_SYMBOL_GPL(regmap_init_i2c); 114EXPORT_SYMBOL_GPL(regmap_init_i2c);
115 115
116MODULE_LICENSE("GPL");
diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c
index 4deba0621bc7..f8396945d6ed 100644
--- a/drivers/base/regmap/regmap-spi.c
+++ b/drivers/base/regmap/regmap-spi.c
@@ -13,6 +13,7 @@
13#include <linux/regmap.h> 13#include <linux/regmap.h>
14#include <linux/spi/spi.h> 14#include <linux/spi/spi.h>
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/module.h>
16 17
17static int regmap_spi_write(struct device *dev, const void *data, size_t count) 18static int regmap_spi_write(struct device *dev, const void *data, size_t count)
18{ 19{
@@ -70,3 +71,5 @@ struct regmap *regmap_init_spi(struct spi_device *spi,
70 return regmap_init(&spi->dev, &regmap_spi, config); 71 return regmap_init(&spi->dev, &regmap_spi, config);
71} 72}
72EXPORT_SYMBOL_GPL(regmap_init_spi); 73EXPORT_SYMBOL_GPL(regmap_init_spi);
74
75MODULE_LICENSE("GPL");
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index cf3565cae93d..20663f8dae45 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -168,13 +168,11 @@ struct regmap *regmap_init(struct device *dev,
168 map->work_buf = kmalloc(map->format.buf_size, GFP_KERNEL); 168 map->work_buf = kmalloc(map->format.buf_size, GFP_KERNEL);
169 if (map->work_buf == NULL) { 169 if (map->work_buf == NULL) {
170 ret = -ENOMEM; 170 ret = -ENOMEM;
171 goto err_bus; 171 goto err_map;
172 } 172 }
173 173
174 return map; 174 return map;
175 175
176err_bus:
177 module_put(map->bus->owner);
178err_map: 176err_map:
179 kfree(map); 177 kfree(map);
180err: 178err:
@@ -188,7 +186,6 @@ EXPORT_SYMBOL_GPL(regmap_init);
188void regmap_exit(struct regmap *map) 186void regmap_exit(struct regmap *map)
189{ 187{
190 kfree(map->work_buf); 188 kfree(map->work_buf);
191 module_put(map->bus->owner);
192 kfree(map); 189 kfree(map);
193} 190}
194EXPORT_SYMBOL_GPL(regmap_exit); 191EXPORT_SYMBOL_GPL(regmap_exit);
@@ -317,7 +314,7 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
317 u8[0] |= map->bus->read_flag_mask; 314 u8[0] |= map->bus->read_flag_mask;
318 315
319 ret = map->bus->read(map->dev, map->work_buf, map->format.reg_bytes, 316 ret = map->bus->read(map->dev, map->work_buf, map->format.reg_bytes,
320 val, map->format.val_bytes); 317 val, val_len);
321 if (ret != 0) 318 if (ret != 0)
322 return ret; 319 return ret;
323 320
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index 873e2e4ac55f..73b7b1a18fab 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -15,6 +15,7 @@ MODULE_LICENSE("GPL");
15static int bcma_bus_match(struct device *dev, struct device_driver *drv); 15static int bcma_bus_match(struct device *dev, struct device_driver *drv);
16static int bcma_device_probe(struct device *dev); 16static int bcma_device_probe(struct device *dev);
17static int bcma_device_remove(struct device *dev); 17static int bcma_device_remove(struct device *dev);
18static int bcma_device_uevent(struct device *dev, struct kobj_uevent_env *env);
18 19
19static ssize_t manuf_show(struct device *dev, struct device_attribute *attr, char *buf) 20static ssize_t manuf_show(struct device *dev, struct device_attribute *attr, char *buf)
20{ 21{
@@ -49,6 +50,7 @@ static struct bus_type bcma_bus_type = {
49 .match = bcma_bus_match, 50 .match = bcma_bus_match,
50 .probe = bcma_device_probe, 51 .probe = bcma_device_probe,
51 .remove = bcma_device_remove, 52 .remove = bcma_device_remove,
53 .uevent = bcma_device_uevent,
52 .dev_attrs = bcma_device_attrs, 54 .dev_attrs = bcma_device_attrs,
53}; 55};
54 56
@@ -227,6 +229,16 @@ static int bcma_device_remove(struct device *dev)
227 return 0; 229 return 0;
228} 230}
229 231
232static int bcma_device_uevent(struct device *dev, struct kobj_uevent_env *env)
233{
234 struct bcma_device *core = container_of(dev, struct bcma_device, dev);
235
236 return add_uevent_var(env,
237 "MODALIAS=bcma:m%04Xid%04Xrev%02Xcl%02X",
238 core->id.manuf, core->id.id,
239 core->id.rev, core->id.class);
240}
241
230static int __init bcma_modinit(void) 242static int __init bcma_modinit(void)
231{ 243{
232 int err; 244 int err;
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 717d6e4e18d3..6f07ec1c2f58 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -256,6 +256,21 @@ config BLK_DEV_LOOP
256 256
257 Most users will answer N here. 257 Most users will answer N here.
258 258
259config BLK_DEV_LOOP_MIN_COUNT
260 int "Number of loop devices to pre-create at init time"
261 depends on BLK_DEV_LOOP
262 default 8
263 help
264 Static number of loop devices to be unconditionally pre-created
265 at init time.
266
267 This default value can be overwritten on the kernel command
268 line or with module-parameter loop.max_loop.
269
270 The historic default is 8. If a late 2011 version of losetup(8)
271 is used, it can be set to 0, since needed loop devices can be
272 dynamically allocated with the /dev/loop-control interface.
273
259config BLK_DEV_CRYPTOLOOP 274config BLK_DEV_CRYPTOLOOP
260 tristate "Cryptoloop Support" 275 tristate "Cryptoloop Support"
261 select CRYPTO 276 select CRYPTO
@@ -471,7 +486,7 @@ config XEN_BLKDEV_FRONTEND
471 in another domain which drives the actual block device. 486 in another domain which drives the actual block device.
472 487
473config XEN_BLKDEV_BACKEND 488config XEN_BLKDEV_BACKEND
474 tristate "Block-device backend driver" 489 tristate "Xen block-device backend driver"
475 depends on XEN_BACKEND 490 depends on XEN_BACKEND
476 help 491 help
477 The block-device backend driver allows the kernel to export its 492 The block-device backend driver allows the kernel to export its
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 515bcd948a43..0feab261e295 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -1829,10 +1829,10 @@ static int drbd_nl_syncer_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *n
1829 1829
1830 /* silently ignore cpu mask on UP kernel */ 1830 /* silently ignore cpu mask on UP kernel */
1831 if (nr_cpu_ids > 1 && sc.cpu_mask[0] != 0) { 1831 if (nr_cpu_ids > 1 && sc.cpu_mask[0] != 0) {
1832 err = __bitmap_parse(sc.cpu_mask, 32, 0, 1832 err = bitmap_parse(sc.cpu_mask, 32,
1833 cpumask_bits(new_cpu_mask), nr_cpu_ids); 1833 cpumask_bits(new_cpu_mask), nr_cpu_ids);
1834 if (err) { 1834 if (err) {
1835 dev_warn(DEV, "__bitmap_parse() failed with %d\n", err); 1835 dev_warn(DEV, "bitmap_parse() failed with %d\n", err);
1836 retcode = ERR_CPU_MASK_PARSE; 1836 retcode = ERR_CPU_MASK_PARSE;
1837 goto fail; 1837 goto fail;
1838 } 1838 }
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 98de8f418676..9955a53733b2 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -4250,7 +4250,7 @@ static int __init floppy_init(void)
4250 use_virtual_dma = can_use_virtual_dma & 1; 4250 use_virtual_dma = can_use_virtual_dma & 1;
4251 fdc_state[0].address = FDC1; 4251 fdc_state[0].address = FDC1;
4252 if (fdc_state[0].address == -1) { 4252 if (fdc_state[0].address == -1) {
4253 del_timer(&fd_timeout); 4253 del_timer_sync(&fd_timeout);
4254 err = -ENODEV; 4254 err = -ENODEV;
4255 goto out_unreg_region; 4255 goto out_unreg_region;
4256 } 4256 }
@@ -4261,7 +4261,7 @@ static int __init floppy_init(void)
4261 fdc = 0; /* reset fdc in case of unexpected interrupt */ 4261 fdc = 0; /* reset fdc in case of unexpected interrupt */
4262 err = floppy_grab_irq_and_dma(); 4262 err = floppy_grab_irq_and_dma();
4263 if (err) { 4263 if (err) {
4264 del_timer(&fd_timeout); 4264 del_timer_sync(&fd_timeout);
4265 err = -EBUSY; 4265 err = -EBUSY;
4266 goto out_unreg_region; 4266 goto out_unreg_region;
4267 } 4267 }
@@ -4318,7 +4318,7 @@ static int __init floppy_init(void)
4318 user_reset_fdc(-1, FD_RESET_ALWAYS, false); 4318 user_reset_fdc(-1, FD_RESET_ALWAYS, false);
4319 } 4319 }
4320 fdc = 0; 4320 fdc = 0;
4321 del_timer(&fd_timeout); 4321 del_timer_sync(&fd_timeout);
4322 current_drive = 0; 4322 current_drive = 0;
4323 initialized = true; 4323 initialized = true;
4324 if (have_no_fdc) { 4324 if (have_no_fdc) {
@@ -4368,7 +4368,7 @@ out_unreg_blkdev:
4368 unregister_blkdev(FLOPPY_MAJOR, "fd"); 4368 unregister_blkdev(FLOPPY_MAJOR, "fd");
4369out_put_disk: 4369out_put_disk:
4370 while (dr--) { 4370 while (dr--) {
4371 del_timer(&motor_off_timer[dr]); 4371 del_timer_sync(&motor_off_timer[dr]);
4372 if (disks[dr]->queue) 4372 if (disks[dr]->queue)
4373 blk_cleanup_queue(disks[dr]->queue); 4373 blk_cleanup_queue(disks[dr]->queue);
4374 put_disk(disks[dr]); 4374 put_disk(disks[dr]);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 76c8da78212b..4720c7ade0ae 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -75,11 +75,11 @@
75#include <linux/kthread.h> 75#include <linux/kthread.h>
76#include <linux/splice.h> 76#include <linux/splice.h>
77#include <linux/sysfs.h> 77#include <linux/sysfs.h>
78 78#include <linux/miscdevice.h>
79#include <asm/uaccess.h> 79#include <asm/uaccess.h>
80 80
81static LIST_HEAD(loop_devices); 81static DEFINE_IDR(loop_index_idr);
82static DEFINE_MUTEX(loop_devices_mutex); 82static DEFINE_MUTEX(loop_index_mutex);
83 83
84static int max_part; 84static int max_part;
85static int part_shift; 85static int part_shift;
@@ -722,17 +722,10 @@ static inline int is_loop_device(struct file *file)
722static ssize_t loop_attr_show(struct device *dev, char *page, 722static ssize_t loop_attr_show(struct device *dev, char *page,
723 ssize_t (*callback)(struct loop_device *, char *)) 723 ssize_t (*callback)(struct loop_device *, char *))
724{ 724{
725 struct loop_device *l, *lo = NULL; 725 struct gendisk *disk = dev_to_disk(dev);
726 726 struct loop_device *lo = disk->private_data;
727 mutex_lock(&loop_devices_mutex);
728 list_for_each_entry(l, &loop_devices, lo_list)
729 if (disk_to_dev(l->lo_disk) == dev) {
730 lo = l;
731 break;
732 }
733 mutex_unlock(&loop_devices_mutex);
734 727
735 return lo ? callback(lo, page) : -EIO; 728 return callback(lo, page);
736} 729}
737 730
738#define LOOP_ATTR_RO(_name) \ 731#define LOOP_ATTR_RO(_name) \
@@ -750,10 +743,10 @@ static ssize_t loop_attr_backing_file_show(struct loop_device *lo, char *buf)
750 ssize_t ret; 743 ssize_t ret;
751 char *p = NULL; 744 char *p = NULL;
752 745
753 mutex_lock(&lo->lo_ctl_mutex); 746 spin_lock_irq(&lo->lo_lock);
754 if (lo->lo_backing_file) 747 if (lo->lo_backing_file)
755 p = d_path(&lo->lo_backing_file->f_path, buf, PAGE_SIZE - 1); 748 p = d_path(&lo->lo_backing_file->f_path, buf, PAGE_SIZE - 1);
756 mutex_unlock(&lo->lo_ctl_mutex); 749 spin_unlock_irq(&lo->lo_lock);
757 750
758 if (IS_ERR_OR_NULL(p)) 751 if (IS_ERR_OR_NULL(p))
759 ret = PTR_ERR(p); 752 ret = PTR_ERR(p);
@@ -1007,7 +1000,9 @@ static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev)
1007 1000
1008 kthread_stop(lo->lo_thread); 1001 kthread_stop(lo->lo_thread);
1009 1002
1003 spin_lock_irq(&lo->lo_lock);
1010 lo->lo_backing_file = NULL; 1004 lo->lo_backing_file = NULL;
1005 spin_unlock_irq(&lo->lo_lock);
1011 1006
1012 loop_release_xfer(lo); 1007 loop_release_xfer(lo);
1013 lo->transfer = NULL; 1008 lo->transfer = NULL;
@@ -1485,13 +1480,22 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
1485 1480
1486static int lo_open(struct block_device *bdev, fmode_t mode) 1481static int lo_open(struct block_device *bdev, fmode_t mode)
1487{ 1482{
1488 struct loop_device *lo = bdev->bd_disk->private_data; 1483 struct loop_device *lo;
1484 int err = 0;
1485
1486 mutex_lock(&loop_index_mutex);
1487 lo = bdev->bd_disk->private_data;
1488 if (!lo) {
1489 err = -ENXIO;
1490 goto out;
1491 }
1489 1492
1490 mutex_lock(&lo->lo_ctl_mutex); 1493 mutex_lock(&lo->lo_ctl_mutex);
1491 lo->lo_refcnt++; 1494 lo->lo_refcnt++;
1492 mutex_unlock(&lo->lo_ctl_mutex); 1495 mutex_unlock(&lo->lo_ctl_mutex);
1493 1496out:
1494 return 0; 1497 mutex_unlock(&loop_index_mutex);
1498 return err;
1495} 1499}
1496 1500
1497static int lo_release(struct gendisk *disk, fmode_t mode) 1501static int lo_release(struct gendisk *disk, fmode_t mode)
@@ -1557,40 +1561,71 @@ int loop_register_transfer(struct loop_func_table *funcs)
1557 return 0; 1561 return 0;
1558} 1562}
1559 1563
1564static int unregister_transfer_cb(int id, void *ptr, void *data)
1565{
1566 struct loop_device *lo = ptr;
1567 struct loop_func_table *xfer = data;
1568
1569 mutex_lock(&lo->lo_ctl_mutex);
1570 if (lo->lo_encryption == xfer)
1571 loop_release_xfer(lo);
1572 mutex_unlock(&lo->lo_ctl_mutex);
1573 return 0;
1574}
1575
1560int loop_unregister_transfer(int number) 1576int loop_unregister_transfer(int number)
1561{ 1577{
1562 unsigned int n = number; 1578 unsigned int n = number;
1563 struct loop_device *lo;
1564 struct loop_func_table *xfer; 1579 struct loop_func_table *xfer;
1565 1580
1566 if (n == 0 || n >= MAX_LO_CRYPT || (xfer = xfer_funcs[n]) == NULL) 1581 if (n == 0 || n >= MAX_LO_CRYPT || (xfer = xfer_funcs[n]) == NULL)
1567 return -EINVAL; 1582 return -EINVAL;
1568 1583
1569 xfer_funcs[n] = NULL; 1584 xfer_funcs[n] = NULL;
1570 1585 idr_for_each(&loop_index_idr, &unregister_transfer_cb, xfer);
1571 list_for_each_entry(lo, &loop_devices, lo_list) {
1572 mutex_lock(&lo->lo_ctl_mutex);
1573
1574 if (lo->lo_encryption == xfer)
1575 loop_release_xfer(lo);
1576
1577 mutex_unlock(&lo->lo_ctl_mutex);
1578 }
1579
1580 return 0; 1586 return 0;
1581} 1587}
1582 1588
1583EXPORT_SYMBOL(loop_register_transfer); 1589EXPORT_SYMBOL(loop_register_transfer);
1584EXPORT_SYMBOL(loop_unregister_transfer); 1590EXPORT_SYMBOL(loop_unregister_transfer);
1585 1591
1586static struct loop_device *loop_alloc(int i) 1592static int loop_add(struct loop_device **l, int i)
1587{ 1593{
1588 struct loop_device *lo; 1594 struct loop_device *lo;
1589 struct gendisk *disk; 1595 struct gendisk *disk;
1596 int err;
1590 1597
1591 lo = kzalloc(sizeof(*lo), GFP_KERNEL); 1598 lo = kzalloc(sizeof(*lo), GFP_KERNEL);
1592 if (!lo) 1599 if (!lo) {
1600 err = -ENOMEM;
1593 goto out; 1601 goto out;
1602 }
1603
1604 err = idr_pre_get(&loop_index_idr, GFP_KERNEL);
1605 if (err < 0)
1606 goto out_free_dev;
1607
1608 if (i >= 0) {
1609 int m;
1610
1611 /* create specific i in the index */
1612 err = idr_get_new_above(&loop_index_idr, lo, i, &m);
1613 if (err >= 0 && i != m) {
1614 idr_remove(&loop_index_idr, m);
1615 err = -EEXIST;
1616 }
1617 } else if (i == -1) {
1618 int m;
1619
1620 /* get next free nr */
1621 err = idr_get_new(&loop_index_idr, lo, &m);
1622 if (err >= 0)
1623 i = m;
1624 } else {
1625 err = -EINVAL;
1626 }
1627 if (err < 0)
1628 goto out_free_dev;
1594 1629
1595 lo->lo_queue = blk_alloc_queue(GFP_KERNEL); 1630 lo->lo_queue = blk_alloc_queue(GFP_KERNEL);
1596 if (!lo->lo_queue) 1631 if (!lo->lo_queue)
@@ -1611,81 +1646,158 @@ static struct loop_device *loop_alloc(int i)
1611 disk->private_data = lo; 1646 disk->private_data = lo;
1612 disk->queue = lo->lo_queue; 1647 disk->queue = lo->lo_queue;
1613 sprintf(disk->disk_name, "loop%d", i); 1648 sprintf(disk->disk_name, "loop%d", i);
1614 return lo; 1649 add_disk(disk);
1650 *l = lo;
1651 return lo->lo_number;
1615 1652
1616out_free_queue: 1653out_free_queue:
1617 blk_cleanup_queue(lo->lo_queue); 1654 blk_cleanup_queue(lo->lo_queue);
1618out_free_dev: 1655out_free_dev:
1619 kfree(lo); 1656 kfree(lo);
1620out: 1657out:
1621 return NULL; 1658 return err;
1622} 1659}
1623 1660
1624static void loop_free(struct loop_device *lo) 1661static void loop_remove(struct loop_device *lo)
1625{ 1662{
1663 del_gendisk(lo->lo_disk);
1626 blk_cleanup_queue(lo->lo_queue); 1664 blk_cleanup_queue(lo->lo_queue);
1627 put_disk(lo->lo_disk); 1665 put_disk(lo->lo_disk);
1628 list_del(&lo->lo_list);
1629 kfree(lo); 1666 kfree(lo);
1630} 1667}
1631 1668
1632static struct loop_device *loop_init_one(int i) 1669static int find_free_cb(int id, void *ptr, void *data)
1670{
1671 struct loop_device *lo = ptr;
1672 struct loop_device **l = data;
1673
1674 if (lo->lo_state == Lo_unbound) {
1675 *l = lo;
1676 return 1;
1677 }
1678 return 0;
1679}
1680
1681static int loop_lookup(struct loop_device **l, int i)
1633{ 1682{
1634 struct loop_device *lo; 1683 struct loop_device *lo;
1684 int ret = -ENODEV;
1635 1685
1636 list_for_each_entry(lo, &loop_devices, lo_list) { 1686 if (i < 0) {
1637 if (lo->lo_number == i) 1687 int err;
1638 return lo; 1688
1689 err = idr_for_each(&loop_index_idr, &find_free_cb, &lo);
1690 if (err == 1) {
1691 *l = lo;
1692 ret = lo->lo_number;
1693 }
1694 goto out;
1639 } 1695 }
1640 1696
1641 lo = loop_alloc(i); 1697 /* lookup and return a specific i */
1698 lo = idr_find(&loop_index_idr, i);
1642 if (lo) { 1699 if (lo) {
1643 add_disk(lo->lo_disk); 1700 *l = lo;
1644 list_add_tail(&lo->lo_list, &loop_devices); 1701 ret = lo->lo_number;
1645 } 1702 }
1646 return lo; 1703out:
1647} 1704 return ret;
1648
1649static void loop_del_one(struct loop_device *lo)
1650{
1651 del_gendisk(lo->lo_disk);
1652 loop_free(lo);
1653} 1705}
1654 1706
1655static struct kobject *loop_probe(dev_t dev, int *part, void *data) 1707static struct kobject *loop_probe(dev_t dev, int *part, void *data)
1656{ 1708{
1657 struct loop_device *lo; 1709 struct loop_device *lo;
1658 struct kobject *kobj; 1710 struct kobject *kobj;
1711 int err;
1659 1712
1660 mutex_lock(&loop_devices_mutex); 1713 mutex_lock(&loop_index_mutex);
1661 lo = loop_init_one(MINOR(dev) >> part_shift); 1714 err = loop_lookup(&lo, MINOR(dev) >> part_shift);
1662 kobj = lo ? get_disk(lo->lo_disk) : ERR_PTR(-ENOMEM); 1715 if (err < 0)
1663 mutex_unlock(&loop_devices_mutex); 1716 err = loop_add(&lo, MINOR(dev) >> part_shift);
1717 if (err < 0)
1718 kobj = ERR_PTR(err);
1719 else
1720 kobj = get_disk(lo->lo_disk);
1721 mutex_unlock(&loop_index_mutex);
1664 1722
1665 *part = 0; 1723 *part = 0;
1666 return kobj; 1724 return kobj;
1667} 1725}
1668 1726
1727static long loop_control_ioctl(struct file *file, unsigned int cmd,
1728 unsigned long parm)
1729{
1730 struct loop_device *lo;
1731 int ret = -ENOSYS;
1732
1733 mutex_lock(&loop_index_mutex);
1734 switch (cmd) {
1735 case LOOP_CTL_ADD:
1736 ret = loop_lookup(&lo, parm);
1737 if (ret >= 0) {
1738 ret = -EEXIST;
1739 break;
1740 }
1741 ret = loop_add(&lo, parm);
1742 break;
1743 case LOOP_CTL_REMOVE:
1744 ret = loop_lookup(&lo, parm);
1745 if (ret < 0)
1746 break;
1747 mutex_lock(&lo->lo_ctl_mutex);
1748 if (lo->lo_state != Lo_unbound) {
1749 ret = -EBUSY;
1750 mutex_unlock(&lo->lo_ctl_mutex);
1751 break;
1752 }
1753 if (lo->lo_refcnt > 0) {
1754 ret = -EBUSY;
1755 mutex_unlock(&lo->lo_ctl_mutex);
1756 break;
1757 }
1758 lo->lo_disk->private_data = NULL;
1759 mutex_unlock(&lo->lo_ctl_mutex);
1760 idr_remove(&loop_index_idr, lo->lo_number);
1761 loop_remove(lo);
1762 break;
1763 case LOOP_CTL_GET_FREE:
1764 ret = loop_lookup(&lo, -1);
1765 if (ret >= 0)
1766 break;
1767 ret = loop_add(&lo, -1);
1768 }
1769 mutex_unlock(&loop_index_mutex);
1770
1771 return ret;
1772}
1773
1774static const struct file_operations loop_ctl_fops = {
1775 .open = nonseekable_open,
1776 .unlocked_ioctl = loop_control_ioctl,
1777 .compat_ioctl = loop_control_ioctl,
1778 .owner = THIS_MODULE,
1779 .llseek = noop_llseek,
1780};
1781
1782static struct miscdevice loop_misc = {
1783 .minor = LOOP_CTRL_MINOR,
1784 .name = "loop-control",
1785 .fops = &loop_ctl_fops,
1786};
1787
1788MODULE_ALIAS_MISCDEV(LOOP_CTRL_MINOR);
1789MODULE_ALIAS("devname:loop-control");
1790
1669static int __init loop_init(void) 1791static int __init loop_init(void)
1670{ 1792{
1671 int i, nr; 1793 int i, nr;
1672 unsigned long range; 1794 unsigned long range;
1673 struct loop_device *lo, *next; 1795 struct loop_device *lo;
1796 int err;
1674 1797
1675 /* 1798 err = misc_register(&loop_misc);
1676 * loop module now has a feature to instantiate underlying device 1799 if (err < 0)
1677 * structure on-demand, provided that there is an access dev node. 1800 return err;
1678 * However, this will not work well with user space tool that doesn't
1679 * know about such "feature". In order to not break any existing
1680 * tool, we do the following:
1681 *
1682 * (1) if max_loop is specified, create that many upfront, and this
1683 * also becomes a hard limit.
1684 * (2) if max_loop is not specified, create 8 loop device on module
1685 * load, user can further extend loop device by create dev node
1686 * themselves and have kernel automatically instantiate actual
1687 * device on-demand.
1688 */
1689 1801
1690 part_shift = 0; 1802 part_shift = 0;
1691 if (max_part > 0) { 1803 if (max_part > 0) {
@@ -1708,57 +1820,60 @@ static int __init loop_init(void)
1708 if (max_loop > 1UL << (MINORBITS - part_shift)) 1820 if (max_loop > 1UL << (MINORBITS - part_shift))
1709 return -EINVAL; 1821 return -EINVAL;
1710 1822
1823 /*
1824 * If max_loop is specified, create that many devices upfront.
1825 * This also becomes a hard limit. If max_loop is not specified,
1826 * create CONFIG_BLK_DEV_LOOP_MIN_COUNT loop devices at module
1827 * init time. Loop devices can be requested on-demand with the
1828 * /dev/loop-control interface, or be instantiated by accessing
1829 * a 'dead' device node.
1830 */
1711 if (max_loop) { 1831 if (max_loop) {
1712 nr = max_loop; 1832 nr = max_loop;
1713 range = max_loop << part_shift; 1833 range = max_loop << part_shift;
1714 } else { 1834 } else {
1715 nr = 8; 1835 nr = CONFIG_BLK_DEV_LOOP_MIN_COUNT;
1716 range = 1UL << MINORBITS; 1836 range = 1UL << MINORBITS;
1717 } 1837 }
1718 1838
1719 if (register_blkdev(LOOP_MAJOR, "loop")) 1839 if (register_blkdev(LOOP_MAJOR, "loop"))
1720 return -EIO; 1840 return -EIO;
1721 1841
1722 for (i = 0; i < nr; i++) {
1723 lo = loop_alloc(i);
1724 if (!lo)
1725 goto Enomem;
1726 list_add_tail(&lo->lo_list, &loop_devices);
1727 }
1728
1729 /* point of no return */
1730
1731 list_for_each_entry(lo, &loop_devices, lo_list)
1732 add_disk(lo->lo_disk);
1733
1734 blk_register_region(MKDEV(LOOP_MAJOR, 0), range, 1842 blk_register_region(MKDEV(LOOP_MAJOR, 0), range,
1735 THIS_MODULE, loop_probe, NULL, NULL); 1843 THIS_MODULE, loop_probe, NULL, NULL);
1736 1844
1845 /* pre-create number of devices given by config or max_loop */
1846 mutex_lock(&loop_index_mutex);
1847 for (i = 0; i < nr; i++)
1848 loop_add(&lo, i);
1849 mutex_unlock(&loop_index_mutex);
1850
1737 printk(KERN_INFO "loop: module loaded\n"); 1851 printk(KERN_INFO "loop: module loaded\n");
1738 return 0; 1852 return 0;
1853}
1739 1854
1740Enomem: 1855static int loop_exit_cb(int id, void *ptr, void *data)
1741 printk(KERN_INFO "loop: out of memory\n"); 1856{
1742 1857 struct loop_device *lo = ptr;
1743 list_for_each_entry_safe(lo, next, &loop_devices, lo_list)
1744 loop_free(lo);
1745 1858
1746 unregister_blkdev(LOOP_MAJOR, "loop"); 1859 loop_remove(lo);
1747 return -ENOMEM; 1860 return 0;
1748} 1861}
1749 1862
1750static void __exit loop_exit(void) 1863static void __exit loop_exit(void)
1751{ 1864{
1752 unsigned long range; 1865 unsigned long range;
1753 struct loop_device *lo, *next;
1754 1866
1755 range = max_loop ? max_loop << part_shift : 1UL << MINORBITS; 1867 range = max_loop ? max_loop << part_shift : 1UL << MINORBITS;
1756 1868
1757 list_for_each_entry_safe(lo, next, &loop_devices, lo_list) 1869 idr_for_each(&loop_index_idr, &loop_exit_cb, NULL);
1758 loop_del_one(lo); 1870 idr_remove_all(&loop_index_idr);
1871 idr_destroy(&loop_index_idr);
1759 1872
1760 blk_unregister_region(MKDEV(LOOP_MAJOR, 0), range); 1873 blk_unregister_region(MKDEV(LOOP_MAJOR, 0), range);
1761 unregister_blkdev(LOOP_MAJOR, "loop"); 1874 unregister_blkdev(LOOP_MAJOR, "loop");
1875
1876 misc_deregister(&loop_misc);
1762} 1877}
1763 1878
1764module_init(loop_init); 1879module_init(loop_init);
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index 773bfa792777..ae3e167e17ad 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -1184,6 +1184,7 @@ static struct of_device_id swim3_match[] =
1184 { 1184 {
1185 .compatible = "swim3" 1185 .compatible = "swim3"
1186 }, 1186 },
1187 { /* end of list */ }
1187}; 1188};
1188 1189
1189static struct macio_driver swim3_driver = 1190static struct macio_driver swim3_driver =
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index 9e40b283a468..00c57c90e2d6 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -46,7 +46,7 @@
46 46
47#define DRV_PFX "xen-blkback:" 47#define DRV_PFX "xen-blkback:"
48#define DPRINTK(fmt, args...) \ 48#define DPRINTK(fmt, args...) \
49 pr_debug(DRV_PFX "(%s:%d) " fmt ".\n", \ 49 pr_debug(DRV_PFX "(%s:%d) " fmt ".\n", \
50 __func__, __LINE__, ##args) 50 __func__, __LINE__, ##args)
51 51
52 52
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 3f129b45451a..5fd2010f7d2b 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -590,7 +590,7 @@ static void frontend_changed(struct xenbus_device *dev,
590 590
591 /* 591 /*
592 * Enforce precondition before potential leak point. 592 * Enforce precondition before potential leak point.
593 * blkif_disconnect() is idempotent. 593 * xen_blkif_disconnect() is idempotent.
594 */ 594 */
595 xen_blkif_disconnect(be->blkif); 595 xen_blkif_disconnect(be->blkif);
596 596
@@ -601,17 +601,17 @@ static void frontend_changed(struct xenbus_device *dev,
601 break; 601 break;
602 602
603 case XenbusStateClosing: 603 case XenbusStateClosing:
604 xen_blkif_disconnect(be->blkif);
605 xenbus_switch_state(dev, XenbusStateClosing); 604 xenbus_switch_state(dev, XenbusStateClosing);
606 break; 605 break;
607 606
608 case XenbusStateClosed: 607 case XenbusStateClosed:
608 xen_blkif_disconnect(be->blkif);
609 xenbus_switch_state(dev, XenbusStateClosed); 609 xenbus_switch_state(dev, XenbusStateClosed);
610 if (xenbus_dev_is_online(dev)) 610 if (xenbus_dev_is_online(dev))
611 break; 611 break;
612 /* fall through if not online */ 612 /* fall through if not online */
613 case XenbusStateUnknown: 613 case XenbusStateUnknown:
614 /* implies blkif_disconnect() via blkback_remove() */ 614 /* implies xen_blkif_disconnect() via xen_blkbk_remove() */
615 device_unregister(&dev->dev); 615 device_unregister(&dev->dev);
616 break; 616 break;
617 617
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index b536a9cef917..9ea8c2576c70 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -123,8 +123,8 @@ static DEFINE_SPINLOCK(minor_lock);
123#define BLKIF_MINOR_EXT(dev) ((dev)&(~EXTENDED)) 123#define BLKIF_MINOR_EXT(dev) ((dev)&(~EXTENDED))
124#define EMULATED_HD_DISK_MINOR_OFFSET (0) 124#define EMULATED_HD_DISK_MINOR_OFFSET (0)
125#define EMULATED_HD_DISK_NAME_OFFSET (EMULATED_HD_DISK_MINOR_OFFSET / 256) 125#define EMULATED_HD_DISK_NAME_OFFSET (EMULATED_HD_DISK_MINOR_OFFSET / 256)
126#define EMULATED_SD_DISK_MINOR_OFFSET (EMULATED_HD_DISK_MINOR_OFFSET + (4 * 16)) 126#define EMULATED_SD_DISK_MINOR_OFFSET (0)
127#define EMULATED_SD_DISK_NAME_OFFSET (EMULATED_HD_DISK_NAME_OFFSET + 4) 127#define EMULATED_SD_DISK_NAME_OFFSET (EMULATED_SD_DISK_MINOR_OFFSET / 256)
128 128
129#define DEV_NAME "xvd" /* name in /dev */ 129#define DEV_NAME "xvd" /* name in /dev */
130 130
@@ -529,7 +529,7 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
529 minor = BLKIF_MINOR_EXT(info->vdevice); 529 minor = BLKIF_MINOR_EXT(info->vdevice);
530 nr_parts = PARTS_PER_EXT_DISK; 530 nr_parts = PARTS_PER_EXT_DISK;
531 offset = minor / nr_parts; 531 offset = minor / nr_parts;
532 if (xen_hvm_domain() && offset <= EMULATED_HD_DISK_NAME_OFFSET + 4) 532 if (xen_hvm_domain() && offset < EMULATED_HD_DISK_NAME_OFFSET + 4)
533 printk(KERN_WARNING "blkfront: vdevice 0x%x might conflict with " 533 printk(KERN_WARNING "blkfront: vdevice 0x%x might conflict with "
534 "emulated IDE disks,\n\t choose an xvd device name" 534 "emulated IDE disks,\n\t choose an xvd device name"
535 "from xvde on\n", info->vdevice); 535 "from xvde on\n", info->vdevice);
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index a5854735bb2e..db7cb8111fbe 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -63,6 +63,7 @@ static struct usb_device_id ath3k_table[] = {
63 /* Atheros AR3011 with sflash firmware*/ 63 /* Atheros AR3011 with sflash firmware*/
64 { USB_DEVICE(0x0CF3, 0x3002) }, 64 { USB_DEVICE(0x0CF3, 0x3002) },
65 { USB_DEVICE(0x13d3, 0x3304) }, 65 { USB_DEVICE(0x13d3, 0x3304) },
66 { USB_DEVICE(0x0930, 0x0215) },
66 67
67 /* Atheros AR9285 Malbec with sflash firmware */ 68 /* Atheros AR9285 Malbec with sflash firmware */
68 { USB_DEVICE(0x03F0, 0x311D) }, 69 { USB_DEVICE(0x03F0, 0x311D) },
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 91d13a9e8c65..9cbac6b445e1 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -72,9 +72,15 @@ static struct usb_device_id btusb_table[] = {
72 /* Apple MacBookAir3,1, MacBookAir3,2 */ 72 /* Apple MacBookAir3,1, MacBookAir3,2 */
73 { USB_DEVICE(0x05ac, 0x821b) }, 73 { USB_DEVICE(0x05ac, 0x821b) },
74 74
75 /* Apple MacBookAir4,1 */
76 { USB_DEVICE(0x05ac, 0x821f) },
77
75 /* Apple MacBookPro8,2 */ 78 /* Apple MacBookPro8,2 */
76 { USB_DEVICE(0x05ac, 0x821a) }, 79 { USB_DEVICE(0x05ac, 0x821a) },
77 80
81 /* Apple MacMini5,1 */
82 { USB_DEVICE(0x05ac, 0x8281) },
83
78 /* AVM BlueFRITZ! USB v2.0 */ 84 /* AVM BlueFRITZ! USB v2.0 */
79 { USB_DEVICE(0x057c, 0x3800) }, 85 { USB_DEVICE(0x057c, 0x3800) },
80 86
@@ -106,6 +112,7 @@ static struct usb_device_id blacklist_table[] = {
106 /* Atheros 3011 with sflash firmware */ 112 /* Atheros 3011 with sflash firmware */
107 { USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE }, 113 { USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE },
108 { USB_DEVICE(0x13d3, 0x3304), .driver_info = BTUSB_IGNORE }, 114 { USB_DEVICE(0x13d3, 0x3304), .driver_info = BTUSB_IGNORE },
115 { USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE },
109 116
110 /* Atheros AR9285 Malbec with sflash firmware */ 117 /* Atheros AR9285 Malbec with sflash firmware */
111 { USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE }, 118 { USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE },
@@ -256,7 +263,9 @@ static void btusb_intr_complete(struct urb *urb)
256 263
257 err = usb_submit_urb(urb, GFP_ATOMIC); 264 err = usb_submit_urb(urb, GFP_ATOMIC);
258 if (err < 0) { 265 if (err < 0) {
259 if (err != -EPERM) 266 /* -EPERM: urb is being killed;
267 * -ENODEV: device got disconnected */
268 if (err != -EPERM && err != -ENODEV)
260 BT_ERR("%s urb %p failed to resubmit (%d)", 269 BT_ERR("%s urb %p failed to resubmit (%d)",
261 hdev->name, urb, -err); 270 hdev->name, urb, -err);
262 usb_unanchor_urb(urb); 271 usb_unanchor_urb(urb);
@@ -341,7 +350,9 @@ static void btusb_bulk_complete(struct urb *urb)
341 350
342 err = usb_submit_urb(urb, GFP_ATOMIC); 351 err = usb_submit_urb(urb, GFP_ATOMIC);
343 if (err < 0) { 352 if (err < 0) {
344 if (err != -EPERM) 353 /* -EPERM: urb is being killed;
354 * -ENODEV: device got disconnected */
355 if (err != -EPERM && err != -ENODEV)
345 BT_ERR("%s urb %p failed to resubmit (%d)", 356 BT_ERR("%s urb %p failed to resubmit (%d)",
346 hdev->name, urb, -err); 357 hdev->name, urb, -err);
347 usb_unanchor_urb(urb); 358 usb_unanchor_urb(urb);
@@ -431,7 +442,9 @@ static void btusb_isoc_complete(struct urb *urb)
431 442
432 err = usb_submit_urb(urb, GFP_ATOMIC); 443 err = usb_submit_urb(urb, GFP_ATOMIC);
433 if (err < 0) { 444 if (err < 0) {
434 if (err != -EPERM) 445 /* -EPERM: urb is being killed;
446 * -ENODEV: device got disconnected */
447 if (err != -EPERM && err != -ENODEV)
435 BT_ERR("%s urb %p failed to resubmit (%d)", 448 BT_ERR("%s urb %p failed to resubmit (%d)",
436 hdev->name, urb, -err); 449 hdev->name, urb, -err);
437 usb_unanchor_urb(urb); 450 usb_unanchor_urb(urb);
diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
index 65d27aff553a..04d353f58d71 100644
--- a/drivers/bluetooth/btwilink.c
+++ b/drivers/bluetooth/btwilink.c
@@ -125,6 +125,13 @@ static long st_receive(void *priv_data, struct sk_buff *skb)
125/* protocol structure registered with shared transport */ 125/* protocol structure registered with shared transport */
126static struct st_proto_s ti_st_proto[MAX_BT_CHNL_IDS] = { 126static struct st_proto_s ti_st_proto[MAX_BT_CHNL_IDS] = {
127 { 127 {
128 .chnl_id = HCI_EVENT_PKT, /* HCI Events */
129 .hdr_len = sizeof(struct hci_event_hdr),
130 .offset_len_in_hdr = offsetof(struct hci_event_hdr, plen),
131 .len_size = 1, /* sizeof(plen) in struct hci_event_hdr */
132 .reserve = 8,
133 },
134 {
128 .chnl_id = HCI_ACLDATA_PKT, /* ACL */ 135 .chnl_id = HCI_ACLDATA_PKT, /* ACL */
129 .hdr_len = sizeof(struct hci_acl_hdr), 136 .hdr_len = sizeof(struct hci_acl_hdr),
130 .offset_len_in_hdr = offsetof(struct hci_acl_hdr, dlen), 137 .offset_len_in_hdr = offsetof(struct hci_acl_hdr, dlen),
@@ -138,13 +145,6 @@ static struct st_proto_s ti_st_proto[MAX_BT_CHNL_IDS] = {
138 .len_size = 1, /* sizeof(dlen) in struct hci_sco_hdr */ 145 .len_size = 1, /* sizeof(dlen) in struct hci_sco_hdr */
139 .reserve = 8, 146 .reserve = 8,
140 }, 147 },
141 {
142 .chnl_id = HCI_EVENT_PKT, /* HCI Events */
143 .hdr_len = sizeof(struct hci_event_hdr),
144 .offset_len_in_hdr = offsetof(struct hci_event_hdr, plen),
145 .len_size = 1, /* sizeof(plen) in struct hci_event_hdr */
146 .reserve = 8,
147 },
148}; 148};
149 149
150/* Called from HCI core to initialize the device */ 150/* Called from HCI core to initialize the device */
@@ -240,7 +240,7 @@ static int ti_st_close(struct hci_dev *hdev)
240 if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags)) 240 if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags))
241 return 0; 241 return 0;
242 242
243 for (i = 0; i < MAX_BT_CHNL_IDS; i++) { 243 for (i = MAX_BT_CHNL_IDS-1; i >= 0; i--) {
244 err = st_unregister(&ti_st_proto[i]); 244 err = st_unregister(&ti_st_proto[i]);
245 if (err) 245 if (err)
246 BT_ERR("st_unregister(%d) failed with error %d", 246 BT_ERR("st_unregister(%d) failed with error %d",
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 75fb965b8f72..f997c27d79e2 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -1929,11 +1929,17 @@ static int dvd_read_manufact(struct cdrom_device_info *cdi, dvd_struct *s,
1929 goto out; 1929 goto out;
1930 1930
1931 s->manufact.len = buf[0] << 8 | buf[1]; 1931 s->manufact.len = buf[0] << 8 | buf[1];
1932 if (s->manufact.len < 0 || s->manufact.len > 2048) { 1932 if (s->manufact.len < 0) {
1933 cdinfo(CD_WARNING, "Received invalid manufacture info length" 1933 cdinfo(CD_WARNING, "Received invalid manufacture info length"
1934 " (%d)\n", s->manufact.len); 1934 " (%d)\n", s->manufact.len);
1935 ret = -EIO; 1935 ret = -EIO;
1936 } else { 1936 } else {
1937 if (s->manufact.len > 2048) {
1938 cdinfo(CD_WARNING, "Received invalid manufacture info "
1939 "length (%d): truncating to 2048\n",
1940 s->manufact.len);
1941 s->manufact.len = 2048;
1942 }
1937 memcpy(s->manufact.value, &buf[4], s->manufact.len); 1943 memcpy(s->manufact.value, &buf[4], s->manufact.len);
1938 } 1944 }
1939 1945
diff --git a/drivers/char/msm_smd_pkt.c b/drivers/char/msm_smd_pkt.c
index b6f8a65c9960..8eca55deb3a3 100644
--- a/drivers/char/msm_smd_pkt.c
+++ b/drivers/char/msm_smd_pkt.c
@@ -379,9 +379,8 @@ static int __init smd_pkt_init(void)
379 for (i = 0; i < NUM_SMD_PKT_PORTS; ++i) { 379 for (i = 0; i < NUM_SMD_PKT_PORTS; ++i) {
380 smd_pkt_devp[i] = kzalloc(sizeof(struct smd_pkt_dev), 380 smd_pkt_devp[i] = kzalloc(sizeof(struct smd_pkt_dev),
381 GFP_KERNEL); 381 GFP_KERNEL);
382 if (IS_ERR(smd_pkt_devp[i])) { 382 if (!smd_pkt_devp[i]) {
383 r = PTR_ERR(smd_pkt_devp[i]); 383 pr_err("kmalloc() failed\n");
384 pr_err("kmalloc() failed %d\n", r);
385 goto clean_cdevs; 384 goto clean_cdevs;
386 } 385 }
387 386
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index f6595aba4f0f..fa567f1158c2 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -43,6 +43,7 @@ config TCG_NSC
43 43
44config TCG_ATMEL 44config TCG_ATMEL
45 tristate "Atmel TPM Interface" 45 tristate "Atmel TPM Interface"
46 depends on PPC64 || HAS_IOPORT
46 ---help--- 47 ---help---
47 If you have a TPM security chip from Atmel say Yes and it 48 If you have a TPM security chip from Atmel say Yes and it
48 will be accessible from within Linux. To compile this driver 49 will be accessible from within Linux. To compile this driver
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
index caf8012ef47c..9ca5c021d0b6 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -383,6 +383,9 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
383 u32 count, ordinal; 383 u32 count, ordinal;
384 unsigned long stop; 384 unsigned long stop;
385 385
386 if (bufsiz > TPM_BUFSIZE)
387 bufsiz = TPM_BUFSIZE;
388
386 count = be32_to_cpu(*((__be32 *) (buf + 2))); 389 count = be32_to_cpu(*((__be32 *) (buf + 2)));
387 ordinal = be32_to_cpu(*((__be32 *) (buf + 6))); 390 ordinal = be32_to_cpu(*((__be32 *) (buf + 6)));
388 if (count == 0) 391 if (count == 0)
@@ -1102,6 +1105,7 @@ ssize_t tpm_read(struct file *file, char __user *buf,
1102{ 1105{
1103 struct tpm_chip *chip = file->private_data; 1106 struct tpm_chip *chip = file->private_data;
1104 ssize_t ret_size; 1107 ssize_t ret_size;
1108 int rc;
1105 1109
1106 del_singleshot_timer_sync(&chip->user_read_timer); 1110 del_singleshot_timer_sync(&chip->user_read_timer);
1107 flush_work_sync(&chip->work); 1111 flush_work_sync(&chip->work);
@@ -1112,8 +1116,11 @@ ssize_t tpm_read(struct file *file, char __user *buf,
1112 ret_size = size; 1116 ret_size = size;
1113 1117
1114 mutex_lock(&chip->buffer_mutex); 1118 mutex_lock(&chip->buffer_mutex);
1115 if (copy_to_user(buf, chip->data_buffer, ret_size)) 1119 rc = copy_to_user(buf, chip->data_buffer, ret_size);
1120 memset(chip->data_buffer, 0, ret_size);
1121 if (rc)
1116 ret_size = -EFAULT; 1122 ret_size = -EFAULT;
1123
1117 mutex_unlock(&chip->buffer_mutex); 1124 mutex_unlock(&chip->buffer_mutex);
1118 } 1125 }
1119 1126
diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c
index 82facc9104c7..4d2464871ada 100644
--- a/drivers/char/tpm/tpm_nsc.c
+++ b/drivers/char/tpm/tpm_nsc.c
@@ -396,8 +396,6 @@ static void __exit cleanup_nsc(void)
396 if (pdev) { 396 if (pdev) {
397 tpm_nsc_remove(&pdev->dev); 397 tpm_nsc_remove(&pdev->dev);
398 platform_device_unregister(pdev); 398 platform_device_unregister(pdev);
399 kfree(pdev);
400 pdev = NULL;
401 } 399 }
402 400
403 platform_driver_unregister(&nsc_drv); 401 platform_driver_unregister(&nsc_drv);
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index dc7c033ef587..32a77becc098 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -26,6 +26,7 @@
26#include <linux/clk.h> 26#include <linux/clk.h>
27#include <linux/irq.h> 27#include <linux/irq.h>
28#include <linux/err.h> 28#include <linux/err.h>
29#include <linux/delay.h>
29#include <linux/clocksource.h> 30#include <linux/clocksource.h>
30#include <linux/clockchips.h> 31#include <linux/clockchips.h>
31#include <linux/sh_timer.h> 32#include <linux/sh_timer.h>
@@ -150,13 +151,13 @@ static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start)
150 151
151static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate) 152static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
152{ 153{
153 int ret; 154 int k, ret;
154 155
155 /* enable clock */ 156 /* enable clock */
156 ret = clk_enable(p->clk); 157 ret = clk_enable(p->clk);
157 if (ret) { 158 if (ret) {
158 dev_err(&p->pdev->dev, "cannot enable clock\n"); 159 dev_err(&p->pdev->dev, "cannot enable clock\n");
159 return ret; 160 goto err0;
160 } 161 }
161 162
162 /* make sure channel is disabled */ 163 /* make sure channel is disabled */
@@ -174,9 +175,38 @@ static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
174 sh_cmt_write(p, CMCOR, 0xffffffff); 175 sh_cmt_write(p, CMCOR, 0xffffffff);
175 sh_cmt_write(p, CMCNT, 0); 176 sh_cmt_write(p, CMCNT, 0);
176 177
178 /*
179 * According to the sh73a0 user's manual, as CMCNT can be operated
180 * only by the RCLK (Pseudo 32 KHz), there's one restriction on
181 * modifying CMCNT register; two RCLK cycles are necessary before
182 * this register is either read or any modification of the value
183 * it holds is reflected in the LSI's actual operation.
184 *
185 * While at it, we're supposed to clear out the CMCNT as of this
186 * moment, so make sure it's processed properly here. This will
187 * take RCLKx2 at maximum.
188 */
189 for (k = 0; k < 100; k++) {
190 if (!sh_cmt_read(p, CMCNT))
191 break;
192 udelay(1);
193 }
194
195 if (sh_cmt_read(p, CMCNT)) {
196 dev_err(&p->pdev->dev, "cannot clear CMCNT\n");
197 ret = -ETIMEDOUT;
198 goto err1;
199 }
200
177 /* enable channel */ 201 /* enable channel */
178 sh_cmt_start_stop_ch(p, 1); 202 sh_cmt_start_stop_ch(p, 1);
179 return 0; 203 return 0;
204 err1:
205 /* stop clock */
206 clk_disable(p->clk);
207
208 err0:
209 return ret;
180} 210}
181 211
182static void sh_cmt_disable(struct sh_cmt_priv *p) 212static void sh_cmt_disable(struct sh_cmt_priv *p)
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
index 7b0603eb0129..cdc02ac8f41a 100644
--- a/drivers/cpufreq/pcc-cpufreq.c
+++ b/drivers/cpufreq/pcc-cpufreq.c
@@ -261,6 +261,9 @@ static int pcc_get_offset(int cpu)
261 pr = per_cpu(processors, cpu); 261 pr = per_cpu(processors, cpu);
262 pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu); 262 pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu);
263 263
264 if (!pr)
265 return -ENODEV;
266
264 status = acpi_evaluate_object(pr->handle, "PCCP", NULL, &buffer); 267 status = acpi_evaluate_object(pr->handle, "PCCP", NULL, &buffer);
265 if (ACPI_FAILURE(status)) 268 if (ACPI_FAILURE(status))
266 return -ENODEV; 269 return -ENODEV;
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 196a7378d332..be21e3f138a8 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -80,6 +80,7 @@
80#include <linux/interrupt.h> 80#include <linux/interrupt.h>
81#include <linux/slab.h> 81#include <linux/slab.h>
82#include <linux/delay.h> 82#include <linux/delay.h>
83#include <linux/dma-mapping.h>
83#include <linux/dmapool.h> 84#include <linux/dmapool.h>
84#include <linux/dmaengine.h> 85#include <linux/dmaengine.h>
85#include <linux/amba/bus.h> 86#include <linux/amba/bus.h>
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index cd3a7c726bf8..467e4dcb20a0 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -174,8 +174,10 @@ struct d40_base;
174 * @tasklet: Tasklet that gets scheduled from interrupt context to complete a 174 * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
175 * transfer and call client callback. 175 * transfer and call client callback.
176 * @client: Cliented owned descriptor list. 176 * @client: Cliented owned descriptor list.
177 * @pending_queue: Submitted jobs, to be issued by issue_pending()
177 * @active: Active descriptor. 178 * @active: Active descriptor.
178 * @queue: Queued jobs. 179 * @queue: Queued jobs.
180 * @prepare_queue: Prepared jobs.
179 * @dma_cfg: The client configuration of this dma channel. 181 * @dma_cfg: The client configuration of this dma channel.
180 * @configured: whether the dma_cfg configuration is valid 182 * @configured: whether the dma_cfg configuration is valid
181 * @base: Pointer to the device instance struct. 183 * @base: Pointer to the device instance struct.
@@ -203,6 +205,7 @@ struct d40_chan {
203 struct list_head pending_queue; 205 struct list_head pending_queue;
204 struct list_head active; 206 struct list_head active;
205 struct list_head queue; 207 struct list_head queue;
208 struct list_head prepare_queue;
206 struct stedma40_chan_cfg dma_cfg; 209 struct stedma40_chan_cfg dma_cfg;
207 bool configured; 210 bool configured;
208 struct d40_base *base; 211 struct d40_base *base;
@@ -477,7 +480,6 @@ static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
477 480
478 list_for_each_entry_safe(d, _d, &d40c->client, node) 481 list_for_each_entry_safe(d, _d, &d40c->client, node)
479 if (async_tx_test_ack(&d->txd)) { 482 if (async_tx_test_ack(&d->txd)) {
480 d40_pool_lli_free(d40c, d);
481 d40_desc_remove(d); 483 d40_desc_remove(d);
482 desc = d; 484 desc = d;
483 memset(desc, 0, sizeof(*desc)); 485 memset(desc, 0, sizeof(*desc));
@@ -644,8 +646,11 @@ static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
644 return d; 646 return d;
645} 647}
646 648
649/* remove desc from current queue and add it to the pending_queue */
647static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc) 650static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
648{ 651{
652 d40_desc_remove(desc);
653 desc->is_in_client_list = false;
649 list_add_tail(&desc->node, &d40c->pending_queue); 654 list_add_tail(&desc->node, &d40c->pending_queue);
650} 655}
651 656
@@ -803,6 +808,7 @@ done:
803static void d40_term_all(struct d40_chan *d40c) 808static void d40_term_all(struct d40_chan *d40c)
804{ 809{
805 struct d40_desc *d40d; 810 struct d40_desc *d40d;
811 struct d40_desc *_d;
806 812
807 /* Release active descriptors */ 813 /* Release active descriptors */
808 while ((d40d = d40_first_active_get(d40c))) { 814 while ((d40d = d40_first_active_get(d40c))) {
@@ -822,6 +828,21 @@ static void d40_term_all(struct d40_chan *d40c)
822 d40_desc_free(d40c, d40d); 828 d40_desc_free(d40c, d40d);
823 } 829 }
824 830
831 /* Release client owned descriptors */
832 if (!list_empty(&d40c->client))
833 list_for_each_entry_safe(d40d, _d, &d40c->client, node) {
834 d40_desc_remove(d40d);
835 d40_desc_free(d40c, d40d);
836 }
837
838 /* Release descriptors in prepare queue */
839 if (!list_empty(&d40c->prepare_queue))
840 list_for_each_entry_safe(d40d, _d,
841 &d40c->prepare_queue, node) {
842 d40_desc_remove(d40d);
843 d40_desc_free(d40c, d40d);
844 }
845
825 d40c->pending_tx = 0; 846 d40c->pending_tx = 0;
826 d40c->busy = false; 847 d40c->busy = false;
827} 848}
@@ -1208,7 +1229,6 @@ static void dma_tasklet(unsigned long data)
1208 1229
1209 if (!d40d->cyclic) { 1230 if (!d40d->cyclic) {
1210 if (async_tx_test_ack(&d40d->txd)) { 1231 if (async_tx_test_ack(&d40d->txd)) {
1211 d40_pool_lli_free(d40c, d40d);
1212 d40_desc_remove(d40d); 1232 d40_desc_remove(d40d);
1213 d40_desc_free(d40c, d40d); 1233 d40_desc_free(d40c, d40d);
1214 } else { 1234 } else {
@@ -1595,21 +1615,10 @@ static int d40_free_dma(struct d40_chan *d40c)
1595 u32 event; 1615 u32 event;
1596 struct d40_phy_res *phy = d40c->phy_chan; 1616 struct d40_phy_res *phy = d40c->phy_chan;
1597 bool is_src; 1617 bool is_src;
1598 struct d40_desc *d;
1599 struct d40_desc *_d;
1600
1601 1618
1602 /* Terminate all queued and active transfers */ 1619 /* Terminate all queued and active transfers */
1603 d40_term_all(d40c); 1620 d40_term_all(d40c);
1604 1621
1605 /* Release client owned descriptors */
1606 if (!list_empty(&d40c->client))
1607 list_for_each_entry_safe(d, _d, &d40c->client, node) {
1608 d40_pool_lli_free(d40c, d);
1609 d40_desc_remove(d);
1610 d40_desc_free(d40c, d);
1611 }
1612
1613 if (phy == NULL) { 1622 if (phy == NULL) {
1614 chan_err(d40c, "phy == null\n"); 1623 chan_err(d40c, "phy == null\n");
1615 return -EINVAL; 1624 return -EINVAL;
@@ -1911,6 +1920,12 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
1911 goto err; 1920 goto err;
1912 } 1921 }
1913 1922
1923 /*
1924 * add descriptor to the prepare queue in order to be able
1925 * to free them later in terminate_all
1926 */
1927 list_add_tail(&desc->node, &chan->prepare_queue);
1928
1914 spin_unlock_irqrestore(&chan->lock, flags); 1929 spin_unlock_irqrestore(&chan->lock, flags);
1915 1930
1916 return &desc->txd; 1931 return &desc->txd;
@@ -2400,6 +2415,7 @@ static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
2400 INIT_LIST_HEAD(&d40c->queue); 2415 INIT_LIST_HEAD(&d40c->queue);
2401 INIT_LIST_HEAD(&d40c->pending_queue); 2416 INIT_LIST_HEAD(&d40c->pending_queue);
2402 INIT_LIST_HEAD(&d40c->client); 2417 INIT_LIST_HEAD(&d40c->client);
2418 INIT_LIST_HEAD(&d40c->prepare_queue);
2403 2419
2404 tasklet_init(&d40c->tasklet, dma_tasklet, 2420 tasklet_init(&d40c->tasklet, dma_tasklet,
2405 (unsigned long) d40c); 2421 (unsigned long) d40c);
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index 04f1e7ce02b1..f6cf448d69b4 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -1670,7 +1670,7 @@ static void i7core_mce_output_error(struct mem_ctl_info *mci,
1670 char *type, *optype, *err, *msg; 1670 char *type, *optype, *err, *msg;
1671 unsigned long error = m->status & 0x1ff0000l; 1671 unsigned long error = m->status & 0x1ff0000l;
1672 u32 optypenum = (m->status >> 4) & 0x07; 1672 u32 optypenum = (m->status >> 4) & 0x07;
1673 u32 core_err_cnt = (m->status >> 38) && 0x7fff; 1673 u32 core_err_cnt = (m->status >> 38) & 0x7fff;
1674 u32 dimm = (m->misc >> 16) & 0x3; 1674 u32 dimm = (m->misc >> 16) & 0x3;
1675 u32 channel = (m->misc >> 18) & 0x3; 1675 u32 channel = (m->misc >> 18) & 0x3;
1676 u32 syndrome = m->misc >> 32; 1676 u32 syndrome = m->misc >> 32;
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index e6ad3bb6c1a6..4799393247c8 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -216,15 +216,33 @@ struct inbound_phy_packet_event {
216 struct fw_cdev_event_phy_packet phy_packet; 216 struct fw_cdev_event_phy_packet phy_packet;
217}; 217};
218 218
219static inline void __user *u64_to_uptr(__u64 value) 219#ifdef CONFIG_COMPAT
220static void __user *u64_to_uptr(u64 value)
221{
222 if (is_compat_task())
223 return compat_ptr(value);
224 else
225 return (void __user *)(unsigned long)value;
226}
227
228static u64 uptr_to_u64(void __user *ptr)
229{
230 if (is_compat_task())
231 return ptr_to_compat(ptr);
232 else
233 return (u64)(unsigned long)ptr;
234}
235#else
236static inline void __user *u64_to_uptr(u64 value)
220{ 237{
221 return (void __user *)(unsigned long)value; 238 return (void __user *)(unsigned long)value;
222} 239}
223 240
224static inline __u64 uptr_to_u64(void __user *ptr) 241static inline u64 uptr_to_u64(void __user *ptr)
225{ 242{
226 return (__u64)(unsigned long)ptr; 243 return (u64)(unsigned long)ptr;
227} 244}
245#endif /* CONFIG_COMPAT */
228 246
229static int fw_device_op_open(struct inode *inode, struct file *file) 247static int fw_device_op_open(struct inode *inode, struct file *file)
230{ 248{
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index 8ba7f7928f1f..f3b890da1e87 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -455,15 +455,20 @@ static struct device_attribute fw_device_attributes[] = {
455static int read_rom(struct fw_device *device, 455static int read_rom(struct fw_device *device,
456 int generation, int index, u32 *data) 456 int generation, int index, u32 *data)
457{ 457{
458 int rcode; 458 u64 offset = (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + index * 4;
459 int i, rcode;
459 460
460 /* device->node_id, accessed below, must not be older than generation */ 461 /* device->node_id, accessed below, must not be older than generation */
461 smp_rmb(); 462 smp_rmb();
462 463
463 rcode = fw_run_transaction(device->card, TCODE_READ_QUADLET_REQUEST, 464 for (i = 10; i < 100; i += 10) {
464 device->node_id, generation, device->max_speed, 465 rcode = fw_run_transaction(device->card,
465 (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + index * 4, 466 TCODE_READ_QUADLET_REQUEST, device->node_id,
466 data, 4); 467 generation, device->max_speed, offset, data, 4);
468 if (rcode != RCODE_BUSY)
469 break;
470 msleep(i);
471 }
467 be32_to_cpus(data); 472 be32_to_cpus(data);
468 473
469 return rcode; 474 return rcode;
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index bcf792fac442..fd7170a9ad2c 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -290,6 +290,9 @@ static const struct {
290 {PCI_VENDOR_ID_NEC, PCI_ANY_ID, PCI_ANY_ID, 290 {PCI_VENDOR_ID_NEC, PCI_ANY_ID, PCI_ANY_ID,
291 QUIRK_CYCLE_TIMER}, 291 QUIRK_CYCLE_TIMER},
292 292
293 {PCI_VENDOR_ID_O2, PCI_ANY_ID, PCI_ANY_ID,
294 QUIRK_NO_MSI},
295
293 {PCI_VENDOR_ID_RICOH, PCI_ANY_ID, PCI_ANY_ID, 296 {PCI_VENDOR_ID_RICOH, PCI_ANY_ID, PCI_ANY_ID,
294 QUIRK_CYCLE_TIMER}, 297 QUIRK_CYCLE_TIMER},
295 298
@@ -2179,8 +2182,13 @@ static int ohci_enable(struct fw_card *card,
2179 ohci_driver_name, ohci)) { 2182 ohci_driver_name, ohci)) {
2180 fw_error("Failed to allocate interrupt %d.\n", dev->irq); 2183 fw_error("Failed to allocate interrupt %d.\n", dev->irq);
2181 pci_disable_msi(dev); 2184 pci_disable_msi(dev);
2182 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, 2185
2183 ohci->config_rom, ohci->config_rom_bus); 2186 if (config_rom) {
2187 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
2188 ohci->next_config_rom,
2189 ohci->next_config_rom_bus);
2190 ohci->next_config_rom = NULL;
2191 }
2184 return -EIO; 2192 return -EIO;
2185 } 2193 }
2186 2194
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index 41841a3e3f99..17cef864506a 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -1198,6 +1198,10 @@ static int sbp2_remove(struct device *dev)
1198{ 1198{
1199 struct fw_unit *unit = fw_unit(dev); 1199 struct fw_unit *unit = fw_unit(dev);
1200 struct sbp2_target *tgt = dev_get_drvdata(&unit->device); 1200 struct sbp2_target *tgt = dev_get_drvdata(&unit->device);
1201 struct sbp2_logical_unit *lu;
1202
1203 list_for_each_entry(lu, &tgt->lu_list, link)
1204 cancel_delayed_work_sync(&lu->work);
1201 1205
1202 sbp2_target_put(tgt); 1206 sbp2_target_put(tgt);
1203 return 0; 1207 return 0;
diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c
index 68810fd1a59d..aa83de9db1b9 100644
--- a/drivers/firmware/google/gsmi.c
+++ b/drivers/firmware/google/gsmi.c
@@ -420,7 +420,7 @@ static efi_status_t gsmi_get_next_variable(unsigned long *name_size,
420 420
421static efi_status_t gsmi_set_variable(efi_char16_t *name, 421static efi_status_t gsmi_set_variable(efi_char16_t *name,
422 efi_guid_t *vendor, 422 efi_guid_t *vendor,
423 unsigned long attr, 423 u32 attr,
424 unsigned long data_size, 424 unsigned long data_size,
425 void *data) 425 void *data)
426{ 426{
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index d539efd96d4b..4caa3d37bbde 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -178,6 +178,15 @@ config GPIO_SCH
178 The Intel Tunnel Creek processor has 5 GPIOs powered by the 178 The Intel Tunnel Creek processor has 5 GPIOs powered by the
179 core power rail and 9 from suspend power supply. 179 core power rail and 9 from suspend power supply.
180 180
181config GPIO_U300
182 bool "ST-Ericsson U300 COH 901 335/571 GPIO"
183 depends on GPIOLIB && ARCH_U300
184 help
185 Say yes here to support GPIO interface on ST-Ericsson U300.
186 The names of the two IP block variants supported are
187 COH 901 335 and COH 901 571/3. They contain 3, 5 or 7
188 ports of 8 GPIO pins each.
189
181config GPIO_VX855 190config GPIO_VX855
182 tristate "VIA VX855/VX875 GPIO" 191 tristate "VIA VX855/VX875 GPIO"
183 depends on MFD_SUPPORT && PCI 192 depends on MFD_SUPPORT && PCI
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 9588948c96f0..19c5d27b6d2e 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -14,11 +14,14 @@ obj-$(CONFIG_GPIO_ADP5588) += gpio-adp5588.o
14obj-$(CONFIG_GPIO_BT8XX) += gpio-bt8xx.o 14obj-$(CONFIG_GPIO_BT8XX) += gpio-bt8xx.o
15obj-$(CONFIG_GPIO_CS5535) += gpio-cs5535.o 15obj-$(CONFIG_GPIO_CS5535) += gpio-cs5535.o
16obj-$(CONFIG_GPIO_DA9052) += gpio-da9052.o 16obj-$(CONFIG_GPIO_DA9052) += gpio-da9052.o
17obj-$(CONFIG_ARCH_DAVINCI) += gpio-davinci.o
17obj-$(CONFIG_GPIO_EP93XX) += gpio-ep93xx.o 18obj-$(CONFIG_GPIO_EP93XX) += gpio-ep93xx.o
18obj-$(CONFIG_GPIO_EXYNOS4) += gpio-exynos4.o 19obj-$(CONFIG_GPIO_EXYNOS4) += gpio-exynos4.o
19obj-$(CONFIG_GPIO_IT8761E) += gpio-it8761e.o 20obj-$(CONFIG_GPIO_IT8761E) += gpio-it8761e.o
20obj-$(CONFIG_GPIO_JANZ_TTL) += gpio-janz-ttl.o 21obj-$(CONFIG_GPIO_JANZ_TTL) += gpio-janz-ttl.o
22obj-$(CONFIG_MACH_KS8695) += gpio-ks8695.o
21obj-$(CONFIG_GPIO_LANGWELL) += gpio-langwell.o 23obj-$(CONFIG_GPIO_LANGWELL) += gpio-langwell.o
24obj-$(CONFIG_ARCH_LPC32XX) += gpio-lpc32xx.o
22obj-$(CONFIG_GPIO_MAX730X) += gpio-max730x.o 25obj-$(CONFIG_GPIO_MAX730X) += gpio-max730x.o
23obj-$(CONFIG_GPIO_MAX7300) += gpio-max7300.o 26obj-$(CONFIG_GPIO_MAX7300) += gpio-max7300.o
24obj-$(CONFIG_GPIO_MAX7301) += gpio-max7301.o 27obj-$(CONFIG_GPIO_MAX7301) += gpio-max7301.o
@@ -37,18 +40,20 @@ obj-$(CONFIG_GPIO_PCA953X) += gpio-pca953x.o
37obj-$(CONFIG_GPIO_PCF857X) += gpio-pcf857x.o 40obj-$(CONFIG_GPIO_PCF857X) += gpio-pcf857x.o
38obj-$(CONFIG_GPIO_PCH) += gpio-pch.o 41obj-$(CONFIG_GPIO_PCH) += gpio-pch.o
39obj-$(CONFIG_GPIO_PL061) += gpio-pl061.o 42obj-$(CONFIG_GPIO_PL061) += gpio-pl061.o
43obj-$(CONFIG_PLAT_PXA) += gpio-pxa.o
40obj-$(CONFIG_GPIO_RDC321X) += gpio-rdc321x.o 44obj-$(CONFIG_GPIO_RDC321X) += gpio-rdc321x.o
41 45
42obj-$(CONFIG_GPIO_PLAT_SAMSUNG) += gpio-plat-samsung.o 46obj-$(CONFIG_GPIO_PLAT_SAMSUNG) += gpio-plat-samsung.o
43obj-$(CONFIG_GPIO_S5PC100) += gpio-s5pc100.o 47obj-$(CONFIG_GPIO_S5PC100) += gpio-s5pc100.o
44obj-$(CONFIG_GPIO_S5PV210) += gpio-s5pv210.o 48obj-$(CONFIG_GPIO_S5PV210) += gpio-s5pv210.o
45 49obj-$(CONFIG_ARCH_SA1100) += gpio-sa1100.o
46obj-$(CONFIG_GPIO_SCH) += gpio-sch.o 50obj-$(CONFIG_GPIO_SCH) += gpio-sch.o
47obj-$(CONFIG_GPIO_STMPE) += gpio-stmpe.o 51obj-$(CONFIG_GPIO_STMPE) += gpio-stmpe.o
48obj-$(CONFIG_GPIO_SX150X) += gpio-sx150x.o 52obj-$(CONFIG_GPIO_SX150X) += gpio-sx150x.o
49obj-$(CONFIG_GPIO_TC3589X) += gpio-tc3589x.o 53obj-$(CONFIG_GPIO_TC3589X) += gpio-tc3589x.o
50obj-$(CONFIG_ARCH_TEGRA) += gpio-tegra.o 54obj-$(CONFIG_ARCH_TEGRA) += gpio-tegra.o
51obj-$(CONFIG_GPIO_TIMBERDALE) += gpio-timberdale.o 55obj-$(CONFIG_GPIO_TIMBERDALE) += gpio-timberdale.o
56obj-$(CONFIG_ARCH_DAVINCI_TNETV107X) += gpio-tnetv107x.o
52obj-$(CONFIG_GPIO_TPS65910) += gpio-tps65910.o 57obj-$(CONFIG_GPIO_TPS65910) += gpio-tps65910.o
53obj-$(CONFIG_GPIO_TPS65912) += gpio-tps65912.o 58obj-$(CONFIG_GPIO_TPS65912) += gpio-tps65912.o
54obj-$(CONFIG_GPIO_TWL4030) += gpio-twl4030.o 59obj-$(CONFIG_GPIO_TWL4030) += gpio-twl4030.o
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
new file mode 100644
index 000000000000..df0d59570a84
--- /dev/null
+++ b/drivers/gpio/gpio-davinci.c
@@ -0,0 +1,455 @@
1/*
2 * TI DaVinci GPIO Support
3 *
4 * Copyright (c) 2006-2007 David Brownell
5 * Copyright (c) 2007, MontaVista Software, Inc. <source@mvista.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 */
12#include <linux/gpio.h>
13#include <linux/errno.h>
14#include <linux/kernel.h>
15#include <linux/clk.h>
16#include <linux/err.h>
17#include <linux/io.h>
18
19#include <asm/mach/irq.h>
20
21struct davinci_gpio_regs {
22 u32 dir;
23 u32 out_data;
24 u32 set_data;
25 u32 clr_data;
26 u32 in_data;
27 u32 set_rising;
28 u32 clr_rising;
29 u32 set_falling;
30 u32 clr_falling;
31 u32 intstat;
32};
33
34#define chip2controller(chip) \
35 container_of(chip, struct davinci_gpio_controller, chip)
36
37static struct davinci_gpio_controller chips[DIV_ROUND_UP(DAVINCI_N_GPIO, 32)];
38static void __iomem *gpio_base;
39
40static struct davinci_gpio_regs __iomem __init *gpio2regs(unsigned gpio)
41{
42 void __iomem *ptr;
43
44 if (gpio < 32 * 1)
45 ptr = gpio_base + 0x10;
46 else if (gpio < 32 * 2)
47 ptr = gpio_base + 0x38;
48 else if (gpio < 32 * 3)
49 ptr = gpio_base + 0x60;
50 else if (gpio < 32 * 4)
51 ptr = gpio_base + 0x88;
52 else if (gpio < 32 * 5)
53 ptr = gpio_base + 0xb0;
54 else
55 ptr = NULL;
56 return ptr;
57}
58
59static inline struct davinci_gpio_regs __iomem *irq2regs(int irq)
60{
61 struct davinci_gpio_regs __iomem *g;
62
63 g = (__force struct davinci_gpio_regs __iomem *)irq_get_chip_data(irq);
64
65 return g;
66}
67
68static int __init davinci_gpio_irq_setup(void);
69
70/*--------------------------------------------------------------------------*/
71
72/* board setup code *MUST* setup pinmux and enable the GPIO clock. */
73static inline int __davinci_direction(struct gpio_chip *chip,
74 unsigned offset, bool out, int value)
75{
76 struct davinci_gpio_controller *d = chip2controller(chip);
77 struct davinci_gpio_regs __iomem *g = d->regs;
78 unsigned long flags;
79 u32 temp;
80 u32 mask = 1 << offset;
81
82 spin_lock_irqsave(&d->lock, flags);
83 temp = __raw_readl(&g->dir);
84 if (out) {
85 temp &= ~mask;
86 __raw_writel(mask, value ? &g->set_data : &g->clr_data);
87 } else {
88 temp |= mask;
89 }
90 __raw_writel(temp, &g->dir);
91 spin_unlock_irqrestore(&d->lock, flags);
92
93 return 0;
94}
95
96static int davinci_direction_in(struct gpio_chip *chip, unsigned offset)
97{
98 return __davinci_direction(chip, offset, false, 0);
99}
100
101static int
102davinci_direction_out(struct gpio_chip *chip, unsigned offset, int value)
103{
104 return __davinci_direction(chip, offset, true, value);
105}
106
107/*
108 * Read the pin's value (works even if it's set up as output);
109 * returns zero/nonzero.
110 *
111 * Note that changes are synched to the GPIO clock, so reading values back
112 * right after you've set them may give old values.
113 */
114static int davinci_gpio_get(struct gpio_chip *chip, unsigned offset)
115{
116 struct davinci_gpio_controller *d = chip2controller(chip);
117 struct davinci_gpio_regs __iomem *g = d->regs;
118
119 return (1 << offset) & __raw_readl(&g->in_data);
120}
121
122/*
123 * Assuming the pin is muxed as a gpio output, set its output value.
124 */
125static void
126davinci_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
127{
128 struct davinci_gpio_controller *d = chip2controller(chip);
129 struct davinci_gpio_regs __iomem *g = d->regs;
130
131 __raw_writel((1 << offset), value ? &g->set_data : &g->clr_data);
132}
133
134static int __init davinci_gpio_setup(void)
135{
136 int i, base;
137 unsigned ngpio;
138 struct davinci_soc_info *soc_info = &davinci_soc_info;
139 struct davinci_gpio_regs *regs;
140
141 if (soc_info->gpio_type != GPIO_TYPE_DAVINCI)
142 return 0;
143
144 /*
145 * The gpio banks conceptually expose a segmented bitmap,
146 * and "ngpio" is one more than the largest zero-based
147 * bit index that's valid.
148 */
149 ngpio = soc_info->gpio_num;
150 if (ngpio == 0) {
151 pr_err("GPIO setup: how many GPIOs?\n");
152 return -EINVAL;
153 }
154
155 if (WARN_ON(DAVINCI_N_GPIO < ngpio))
156 ngpio = DAVINCI_N_GPIO;
157
158 gpio_base = ioremap(soc_info->gpio_base, SZ_4K);
159 if (WARN_ON(!gpio_base))
160 return -ENOMEM;
161
162 for (i = 0, base = 0; base < ngpio; i++, base += 32) {
163 chips[i].chip.label = "DaVinci";
164
165 chips[i].chip.direction_input = davinci_direction_in;
166 chips[i].chip.get = davinci_gpio_get;
167 chips[i].chip.direction_output = davinci_direction_out;
168 chips[i].chip.set = davinci_gpio_set;
169
170 chips[i].chip.base = base;
171 chips[i].chip.ngpio = ngpio - base;
172 if (chips[i].chip.ngpio > 32)
173 chips[i].chip.ngpio = 32;
174
175 spin_lock_init(&chips[i].lock);
176
177 regs = gpio2regs(base);
178 chips[i].regs = regs;
179 chips[i].set_data = &regs->set_data;
180 chips[i].clr_data = &regs->clr_data;
181 chips[i].in_data = &regs->in_data;
182
183 gpiochip_add(&chips[i].chip);
184 }
185
186 soc_info->gpio_ctlrs = chips;
187 soc_info->gpio_ctlrs_num = DIV_ROUND_UP(ngpio, 32);
188
189 davinci_gpio_irq_setup();
190 return 0;
191}
192pure_initcall(davinci_gpio_setup);
193
194/*--------------------------------------------------------------------------*/
195/*
196 * We expect irqs will normally be set up as input pins, but they can also be
197 * used as output pins ... which is convenient for testing.
198 *
199 * NOTE: The first few GPIOs also have direct INTC hookups in addition
200 * to their GPIOBNK0 irq, with a bit less overhead.
201 *
202 * All those INTC hookups (direct, plus several IRQ banks) can also
203 * serve as EDMA event triggers.
204 */
205
206static void gpio_irq_disable(struct irq_data *d)
207{
208 struct davinci_gpio_regs __iomem *g = irq2regs(d->irq);
209 u32 mask = (u32) irq_data_get_irq_handler_data(d);
210
211 __raw_writel(mask, &g->clr_falling);
212 __raw_writel(mask, &g->clr_rising);
213}
214
215static void gpio_irq_enable(struct irq_data *d)
216{
217 struct davinci_gpio_regs __iomem *g = irq2regs(d->irq);
218 u32 mask = (u32) irq_data_get_irq_handler_data(d);
219 unsigned status = irqd_get_trigger_type(d);
220
221 status &= IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING;
222 if (!status)
223 status = IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING;
224
225 if (status & IRQ_TYPE_EDGE_FALLING)
226 __raw_writel(mask, &g->set_falling);
227 if (status & IRQ_TYPE_EDGE_RISING)
228 __raw_writel(mask, &g->set_rising);
229}
230
231static int gpio_irq_type(struct irq_data *d, unsigned trigger)
232{
233 if (trigger & ~(IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
234 return -EINVAL;
235
236 return 0;
237}
238
239static struct irq_chip gpio_irqchip = {
240 .name = "GPIO",
241 .irq_enable = gpio_irq_enable,
242 .irq_disable = gpio_irq_disable,
243 .irq_set_type = gpio_irq_type,
244 .flags = IRQCHIP_SET_TYPE_MASKED,
245};
246
247static void
248gpio_irq_handler(unsigned irq, struct irq_desc *desc)
249{
250 struct davinci_gpio_regs __iomem *g;
251 u32 mask = 0xffff;
252 struct davinci_gpio_controller *d;
253
254 d = (struct davinci_gpio_controller *)irq_desc_get_handler_data(desc);
255 g = (struct davinci_gpio_regs __iomem *)d->regs;
256
257 /* we only care about one bank */
258 if (irq & 1)
259 mask <<= 16;
260
261 /* temporarily mask (level sensitive) parent IRQ */
262 desc->irq_data.chip->irq_mask(&desc->irq_data);
263 desc->irq_data.chip->irq_ack(&desc->irq_data);
264 while (1) {
265 u32 status;
266 int n;
267 int res;
268
269 /* ack any irqs */
270 status = __raw_readl(&g->intstat) & mask;
271 if (!status)
272 break;
273 __raw_writel(status, &g->intstat);
274
275 /* now demux them to the right lowlevel handler */
276 n = d->irq_base;
277 if (irq & 1) {
278 n += 16;
279 status >>= 16;
280 }
281
282 while (status) {
283 res = ffs(status);
284 n += res;
285 generic_handle_irq(n - 1);
286 status >>= res;
287 }
288 }
289 desc->irq_data.chip->irq_unmask(&desc->irq_data);
290 /* now it may re-trigger */
291}
292
293static int gpio_to_irq_banked(struct gpio_chip *chip, unsigned offset)
294{
295 struct davinci_gpio_controller *d = chip2controller(chip);
296
297 if (d->irq_base >= 0)
298 return d->irq_base + offset;
299 else
300 return -ENODEV;
301}
302
303static int gpio_to_irq_unbanked(struct gpio_chip *chip, unsigned offset)
304{
305 struct davinci_soc_info *soc_info = &davinci_soc_info;
306
307 /* NOTE: we assume for now that only irqs in the first gpio_chip
308 * can provide direct-mapped IRQs to AINTC (up to 32 GPIOs).
309 */
310 if (offset < soc_info->gpio_unbanked)
311 return soc_info->gpio_irq + offset;
312 else
313 return -ENODEV;
314}
315
316static int gpio_irq_type_unbanked(struct irq_data *d, unsigned trigger)
317{
318 struct davinci_gpio_regs __iomem *g = irq2regs(d->irq);
319 u32 mask = (u32) irq_data_get_irq_handler_data(d);
320
321 if (trigger & ~(IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
322 return -EINVAL;
323
324 __raw_writel(mask, (trigger & IRQ_TYPE_EDGE_FALLING)
325 ? &g->set_falling : &g->clr_falling);
326 __raw_writel(mask, (trigger & IRQ_TYPE_EDGE_RISING)
327 ? &g->set_rising : &g->clr_rising);
328
329 return 0;
330}
331
332/*
333 * NOTE: for suspend/resume, probably best to make a platform_device with
334 * suspend_late/resume_resume calls hooking into results of the set_wake()
335 * calls ... so if no gpios are wakeup events the clock can be disabled,
336 * with outputs left at previously set levels, and so that VDD3P3V.IOPWDN0
337 * (dm6446) can be set appropriately for GPIOV33 pins.
338 */
339
340static int __init davinci_gpio_irq_setup(void)
341{
342 unsigned gpio, irq, bank;
343 struct clk *clk;
344 u32 binten = 0;
345 unsigned ngpio, bank_irq;
346 struct davinci_soc_info *soc_info = &davinci_soc_info;
347 struct davinci_gpio_regs __iomem *g;
348
349 ngpio = soc_info->gpio_num;
350
351 bank_irq = soc_info->gpio_irq;
352 if (bank_irq == 0) {
353 printk(KERN_ERR "Don't know first GPIO bank IRQ.\n");
354 return -EINVAL;
355 }
356
357 clk = clk_get(NULL, "gpio");
358 if (IS_ERR(clk)) {
359 printk(KERN_ERR "Error %ld getting gpio clock?\n",
360 PTR_ERR(clk));
361 return PTR_ERR(clk);
362 }
363 clk_enable(clk);
364
365 /* Arrange gpio_to_irq() support, handling either direct IRQs or
366 * banked IRQs. Having GPIOs in the first GPIO bank use direct
367 * IRQs, while the others use banked IRQs, would need some setup
368 * tweaks to recognize hardware which can do that.
369 */
370 for (gpio = 0, bank = 0; gpio < ngpio; bank++, gpio += 32) {
371 chips[bank].chip.to_irq = gpio_to_irq_banked;
372 chips[bank].irq_base = soc_info->gpio_unbanked
373 ? -EINVAL
374 : (soc_info->intc_irq_num + gpio);
375 }
376
377 /*
378 * AINTC can handle direct/unbanked IRQs for GPIOs, with the GPIO
379 * controller only handling trigger modes. We currently assume no
380 * IRQ mux conflicts; gpio_irq_type_unbanked() is only for GPIOs.
381 */
382 if (soc_info->gpio_unbanked) {
383 static struct irq_chip gpio_irqchip_unbanked;
384
385 /* pass "bank 0" GPIO IRQs to AINTC */
386 chips[0].chip.to_irq = gpio_to_irq_unbanked;
387 binten = BIT(0);
388
389 /* AINTC handles mask/unmask; GPIO handles triggering */
390 irq = bank_irq;
391 gpio_irqchip_unbanked = *irq_get_chip(irq);
392 gpio_irqchip_unbanked.name = "GPIO-AINTC";
393 gpio_irqchip_unbanked.irq_set_type = gpio_irq_type_unbanked;
394
395 /* default trigger: both edges */
396 g = gpio2regs(0);
397 __raw_writel(~0, &g->set_falling);
398 __raw_writel(~0, &g->set_rising);
399
400 /* set the direct IRQs up to use that irqchip */
401 for (gpio = 0; gpio < soc_info->gpio_unbanked; gpio++, irq++) {
402 irq_set_chip(irq, &gpio_irqchip_unbanked);
403 irq_set_handler_data(irq, (void *)__gpio_mask(gpio));
404 irq_set_chip_data(irq, (__force void *)g);
405 irq_set_status_flags(irq, IRQ_TYPE_EDGE_BOTH);
406 }
407
408 goto done;
409 }
410
411 /*
412 * Or, AINTC can handle IRQs for banks of 16 GPIO IRQs, which we
413 * then chain through our own handler.
414 */
415 for (gpio = 0, irq = gpio_to_irq(0), bank = 0;
416 gpio < ngpio;
417 bank++, bank_irq++) {
418 unsigned i;
419
420 /* disabled by default, enabled only as needed */
421 g = gpio2regs(gpio);
422 __raw_writel(~0, &g->clr_falling);
423 __raw_writel(~0, &g->clr_rising);
424
425 /* set up all irqs in this bank */
426 irq_set_chained_handler(bank_irq, gpio_irq_handler);
427
428 /*
429 * Each chip handles 32 gpios, and each irq bank consists of 16
430 * gpio irqs. Pass the irq bank's corresponding controller to
431 * the chained irq handler.
432 */
433 irq_set_handler_data(bank_irq, &chips[gpio / 32]);
434
435 for (i = 0; i < 16 && gpio < ngpio; i++, irq++, gpio++) {
436 irq_set_chip(irq, &gpio_irqchip);
437 irq_set_chip_data(irq, (__force void *)g);
438 irq_set_handler_data(irq, (void *)__gpio_mask(gpio));
439 irq_set_handler(irq, handle_simple_irq);
440 set_irq_flags(irq, IRQF_VALID);
441 }
442
443 binten |= BIT(bank);
444 }
445
446done:
447 /* BINTEN -- per-bank interrupt enable. genirq would also let these
448 * bits be set/cleared dynamically.
449 */
450 __raw_writel(binten, gpio_base + 0x08);
451
452 printk(KERN_INFO "DaVinci: %d gpio irqs\n", irq - gpio_to_irq(0));
453
454 return 0;
455}
diff --git a/drivers/gpio/gpio-ep93xx.c b/drivers/gpio/gpio-ep93xx.c
index 72fb9c665320..7aafbb437339 100644
--- a/drivers/gpio/gpio-ep93xx.c
+++ b/drivers/gpio/gpio-ep93xx.c
@@ -23,6 +23,9 @@
23#include <linux/basic_mmio_gpio.h> 23#include <linux/basic_mmio_gpio.h>
24 24
25#include <mach/hardware.h> 25#include <mach/hardware.h>
26#include <mach/gpio-ep93xx.h>
27
28#define irq_to_gpio(irq) ((irq) - gpio_to_irq(0))
26 29
27struct ep93xx_gpio { 30struct ep93xx_gpio {
28 void __iomem *mmio_base; 31 void __iomem *mmio_base;
@@ -307,6 +310,21 @@ static int ep93xx_gpio_set_debounce(struct gpio_chip *chip,
307 return 0; 310 return 0;
308} 311}
309 312
313/*
314 * Map GPIO A0..A7 (0..7) to irq 64..71,
315 * B0..B7 (7..15) to irq 72..79, and
316 * F0..F7 (16..24) to irq 80..87.
317 */
318static int ep93xx_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
319{
320 int gpio = chip->base + offset;
321
322 if (gpio > EP93XX_GPIO_LINE_MAX_IRQ)
323 return -EINVAL;
324
325 return 64 + gpio;
326}
327
310static int ep93xx_gpio_add_bank(struct bgpio_chip *bgc, struct device *dev, 328static int ep93xx_gpio_add_bank(struct bgpio_chip *bgc, struct device *dev,
311 void __iomem *mmio_base, struct ep93xx_gpio_bank *bank) 329 void __iomem *mmio_base, struct ep93xx_gpio_bank *bank)
312{ 330{
@@ -321,8 +339,10 @@ static int ep93xx_gpio_add_bank(struct bgpio_chip *bgc, struct device *dev,
321 bgc->gc.label = bank->label; 339 bgc->gc.label = bank->label;
322 bgc->gc.base = bank->base; 340 bgc->gc.base = bank->base;
323 341
324 if (bank->has_debounce) 342 if (bank->has_debounce) {
325 bgc->gc.set_debounce = ep93xx_gpio_set_debounce; 343 bgc->gc.set_debounce = ep93xx_gpio_set_debounce;
344 bgc->gc.to_irq = ep93xx_gpio_to_irq;
345 }
326 346
327 return gpiochip_add(&bgc->gc); 347 return gpiochip_add(&bgc->gc);
328} 348}
diff --git a/drivers/gpio/gpio-generic.c b/drivers/gpio/gpio-generic.c
index 231714def4d2..4e24436b0f82 100644
--- a/drivers/gpio/gpio-generic.c
+++ b/drivers/gpio/gpio-generic.c
@@ -351,7 +351,7 @@ static int bgpio_setup_direction(struct bgpio_chip *bgc,
351 return 0; 351 return 0;
352} 352}
353 353
354int __devexit bgpio_remove(struct bgpio_chip *bgc) 354int bgpio_remove(struct bgpio_chip *bgc)
355{ 355{
356 int err = gpiochip_remove(&bgc->gc); 356 int err = gpiochip_remove(&bgc->gc);
357 357
@@ -361,15 +361,10 @@ int __devexit bgpio_remove(struct bgpio_chip *bgc)
361} 361}
362EXPORT_SYMBOL_GPL(bgpio_remove); 362EXPORT_SYMBOL_GPL(bgpio_remove);
363 363
364int __devinit bgpio_init(struct bgpio_chip *bgc, 364int bgpio_init(struct bgpio_chip *bgc, struct device *dev,
365 struct device *dev, 365 unsigned long sz, void __iomem *dat, void __iomem *set,
366 unsigned long sz, 366 void __iomem *clr, void __iomem *dirout, void __iomem *dirin,
367 void __iomem *dat, 367 bool big_endian)
368 void __iomem *set,
369 void __iomem *clr,
370 void __iomem *dirout,
371 void __iomem *dirin,
372 bool big_endian)
373{ 368{
374 int ret; 369 int ret;
375 370
diff --git a/drivers/gpio/gpio-ks8695.c b/drivers/gpio/gpio-ks8695.c
new file mode 100644
index 000000000000..a3ac66ea364b
--- /dev/null
+++ b/drivers/gpio/gpio-ks8695.c
@@ -0,0 +1,319 @@
1/*
2 * arch/arm/mach-ks8695/gpio.c
3 *
4 * Copyright (C) 2006 Andrew Victor
5 * Updated to GPIOLIB, Copyright 2008 Simtec Electronics
6 * Daniel Silverstone <dsilvers@simtec.co.uk>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21#include <linux/gpio.h>
22#include <linux/kernel.h>
23#include <linux/mm.h>
24#include <linux/init.h>
25#include <linux/debugfs.h>
26#include <linux/seq_file.h>
27#include <linux/module.h>
28#include <linux/io.h>
29
30#include <mach/hardware.h>
31#include <asm/mach/irq.h>
32
33#include <mach/regs-gpio.h>
34#include <mach/gpio-ks8695.h>
35
36/*
37 * Configure a GPIO line for either GPIO function, or its internal
38 * function (Interrupt, Timer, etc).
39 */
40static void ks8695_gpio_mode(unsigned int pin, short gpio)
41{
42 unsigned int enable[] = { IOPC_IOEINT0EN, IOPC_IOEINT1EN, IOPC_IOEINT2EN, IOPC_IOEINT3EN, IOPC_IOTIM0EN, IOPC_IOTIM1EN };
43 unsigned long x, flags;
44
45 if (pin > KS8695_GPIO_5) /* only GPIO 0..5 have internal functions */
46 return;
47
48 local_irq_save(flags);
49
50 x = __raw_readl(KS8695_GPIO_VA + KS8695_IOPC);
51 if (gpio) /* GPIO: set bit to 0 */
52 x &= ~enable[pin];
53 else /* Internal function: set bit to 1 */
54 x |= enable[pin];
55 __raw_writel(x, KS8695_GPIO_VA + KS8695_IOPC);
56
57 local_irq_restore(flags);
58}
59
60
61static unsigned short gpio_irq[] = { KS8695_IRQ_EXTERN0, KS8695_IRQ_EXTERN1, KS8695_IRQ_EXTERN2, KS8695_IRQ_EXTERN3 };
62
63/*
64 * Configure GPIO pin as external interrupt source.
65 */
66int ks8695_gpio_interrupt(unsigned int pin, unsigned int type)
67{
68 unsigned long x, flags;
69
70 if (pin > KS8695_GPIO_3) /* only GPIO 0..3 can generate IRQ */
71 return -EINVAL;
72
73 local_irq_save(flags);
74
75 /* set pin as input */
76 x = __raw_readl(KS8695_GPIO_VA + KS8695_IOPM);
77 x &= ~IOPM(pin);
78 __raw_writel(x, KS8695_GPIO_VA + KS8695_IOPM);
79
80 local_irq_restore(flags);
81
82 /* Set IRQ triggering type */
83 irq_set_irq_type(gpio_irq[pin], type);
84
85 /* enable interrupt mode */
86 ks8695_gpio_mode(pin, 0);
87
88 return 0;
89}
90EXPORT_SYMBOL(ks8695_gpio_interrupt);
91
92
93
94/* .... Generic GPIO interface .............................................. */
95
96/*
97 * Configure the GPIO line as an input.
98 */
99static int ks8695_gpio_direction_input(struct gpio_chip *gc, unsigned int pin)
100{
101 unsigned long x, flags;
102
103 if (pin > KS8695_GPIO_15)
104 return -EINVAL;
105
106 /* set pin to GPIO mode */
107 ks8695_gpio_mode(pin, 1);
108
109 local_irq_save(flags);
110
111 /* set pin as input */
112 x = __raw_readl(KS8695_GPIO_VA + KS8695_IOPM);
113 x &= ~IOPM(pin);
114 __raw_writel(x, KS8695_GPIO_VA + KS8695_IOPM);
115
116 local_irq_restore(flags);
117
118 return 0;
119}
120
121
122/*
123 * Configure the GPIO line as an output, with default state.
124 */
125static int ks8695_gpio_direction_output(struct gpio_chip *gc,
126 unsigned int pin, int state)
127{
128 unsigned long x, flags;
129
130 if (pin > KS8695_GPIO_15)
131 return -EINVAL;
132
133 /* set pin to GPIO mode */
134 ks8695_gpio_mode(pin, 1);
135
136 local_irq_save(flags);
137
138 /* set line state */
139 x = __raw_readl(KS8695_GPIO_VA + KS8695_IOPD);
140 if (state)
141 x |= IOPD(pin);
142 else
143 x &= ~IOPD(pin);
144 __raw_writel(x, KS8695_GPIO_VA + KS8695_IOPD);
145
146 /* set pin as output */
147 x = __raw_readl(KS8695_GPIO_VA + KS8695_IOPM);
148 x |= IOPM(pin);
149 __raw_writel(x, KS8695_GPIO_VA + KS8695_IOPM);
150
151 local_irq_restore(flags);
152
153 return 0;
154}
155
156
157/*
158 * Set the state of an output GPIO line.
159 */
160static void ks8695_gpio_set_value(struct gpio_chip *gc,
161 unsigned int pin, int state)
162{
163 unsigned long x, flags;
164
165 if (pin > KS8695_GPIO_15)
166 return;
167
168 local_irq_save(flags);
169
170 /* set output line state */
171 x = __raw_readl(KS8695_GPIO_VA + KS8695_IOPD);
172 if (state)
173 x |= IOPD(pin);
174 else
175 x &= ~IOPD(pin);
176 __raw_writel(x, KS8695_GPIO_VA + KS8695_IOPD);
177
178 local_irq_restore(flags);
179}
180
181
182/*
183 * Read the state of a GPIO line.
184 */
185static int ks8695_gpio_get_value(struct gpio_chip *gc, unsigned int pin)
186{
187 unsigned long x;
188
189 if (pin > KS8695_GPIO_15)
190 return -EINVAL;
191
192 x = __raw_readl(KS8695_GPIO_VA + KS8695_IOPD);
193 return (x & IOPD(pin)) != 0;
194}
195
196
197/*
198 * Map GPIO line to IRQ number.
199 */
200static int ks8695_gpio_to_irq(struct gpio_chip *gc, unsigned int pin)
201{
202 if (pin > KS8695_GPIO_3) /* only GPIO 0..3 can generate IRQ */
203 return -EINVAL;
204
205 return gpio_irq[pin];
206}
207
208/*
209 * Map IRQ number to GPIO line.
210 */
211int irq_to_gpio(unsigned int irq)
212{
213 if ((irq < KS8695_IRQ_EXTERN0) || (irq > KS8695_IRQ_EXTERN3))
214 return -EINVAL;
215
216 return (irq - KS8695_IRQ_EXTERN0);
217}
218EXPORT_SYMBOL(irq_to_gpio);
219
220/* GPIOLIB interface */
221
222static struct gpio_chip ks8695_gpio_chip = {
223 .label = "KS8695",
224 .direction_input = ks8695_gpio_direction_input,
225 .direction_output = ks8695_gpio_direction_output,
226 .get = ks8695_gpio_get_value,
227 .set = ks8695_gpio_set_value,
228 .to_irq = ks8695_gpio_to_irq,
229 .base = 0,
230 .ngpio = 16,
231 .can_sleep = 0,
232};
233
234/* Register the GPIOs */
235void ks8695_register_gpios(void)
236{
237 if (gpiochip_add(&ks8695_gpio_chip))
238 printk(KERN_ERR "Unable to register core GPIOs\n");
239}
240
241/* .... Debug interface ..................................................... */
242
243#ifdef CONFIG_DEBUG_FS
244
245static int ks8695_gpio_show(struct seq_file *s, void *unused)
246{
247 unsigned int enable[] = { IOPC_IOEINT0EN, IOPC_IOEINT1EN, IOPC_IOEINT2EN, IOPC_IOEINT3EN, IOPC_IOTIM0EN, IOPC_IOTIM1EN };
248 unsigned int intmask[] = { IOPC_IOEINT0TM, IOPC_IOEINT1TM, IOPC_IOEINT2TM, IOPC_IOEINT3TM };
249 unsigned long mode, ctrl, data;
250 int i;
251
252 mode = __raw_readl(KS8695_GPIO_VA + KS8695_IOPM);
253 ctrl = __raw_readl(KS8695_GPIO_VA + KS8695_IOPC);
254 data = __raw_readl(KS8695_GPIO_VA + KS8695_IOPD);
255
256 seq_printf(s, "Pin\tI/O\tFunction\tState\n\n");
257
258 for (i = KS8695_GPIO_0; i <= KS8695_GPIO_15 ; i++) {
259 seq_printf(s, "%i:\t", i);
260
261 seq_printf(s, "%s\t", (mode & IOPM(i)) ? "Output" : "Input");
262
263 if (i <= KS8695_GPIO_3) {
264 if (ctrl & enable[i]) {
265 seq_printf(s, "EXT%i ", i);
266
267 switch ((ctrl & intmask[i]) >> (4 * i)) {
268 case IOPC_TM_LOW:
269 seq_printf(s, "(Low)"); break;
270 case IOPC_TM_HIGH:
271 seq_printf(s, "(High)"); break;
272 case IOPC_TM_RISING:
273 seq_printf(s, "(Rising)"); break;
274 case IOPC_TM_FALLING:
275 seq_printf(s, "(Falling)"); break;
276 case IOPC_TM_EDGE:
277 seq_printf(s, "(Edges)"); break;
278 }
279 }
280 else
281 seq_printf(s, "GPIO\t");
282 }
283 else if (i <= KS8695_GPIO_5) {
284 if (ctrl & enable[i])
285 seq_printf(s, "TOUT%i\t", i - KS8695_GPIO_4);
286 else
287 seq_printf(s, "GPIO\t");
288 }
289 else
290 seq_printf(s, "GPIO\t");
291
292 seq_printf(s, "\t");
293
294 seq_printf(s, "%i\n", (data & IOPD(i)) ? 1 : 0);
295 }
296 return 0;
297}
298
299static int ks8695_gpio_open(struct inode *inode, struct file *file)
300{
301 return single_open(file, ks8695_gpio_show, NULL);
302}
303
304static const struct file_operations ks8695_gpio_operations = {
305 .open = ks8695_gpio_open,
306 .read = seq_read,
307 .llseek = seq_lseek,
308 .release = single_release,
309};
310
311static int __init ks8695_gpio_debugfs_init(void)
312{
313 /* /sys/kernel/debug/ks8695_gpio */
314 (void) debugfs_create_file("ks8695_gpio", S_IFREG | S_IRUGO, NULL, NULL, &ks8695_gpio_operations);
315 return 0;
316}
317postcore_initcall(ks8695_gpio_debugfs_init);
318
319#endif
diff --git a/drivers/gpio/gpio-lpc32xx.c b/drivers/gpio/gpio-lpc32xx.c
new file mode 100644
index 000000000000..5b6948081f8f
--- /dev/null
+++ b/drivers/gpio/gpio-lpc32xx.c
@@ -0,0 +1,446 @@
1/*
2 * arch/arm/mach-lpc32xx/gpiolib.c
3 *
4 * Author: Kevin Wells <kevin.wells@nxp.com>
5 *
6 * Copyright (C) 2010 NXP Semiconductors
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 */
18
19#include <linux/kernel.h>
20#include <linux/init.h>
21#include <linux/io.h>
22#include <linux/errno.h>
23#include <linux/gpio.h>
24
25#include <mach/hardware.h>
26#include <mach/platform.h>
27#include <mach/gpio-lpc32xx.h>
28
29#define LPC32XX_GPIO_P3_INP_STATE _GPREG(0x000)
30#define LPC32XX_GPIO_P3_OUTP_SET _GPREG(0x004)
31#define LPC32XX_GPIO_P3_OUTP_CLR _GPREG(0x008)
32#define LPC32XX_GPIO_P3_OUTP_STATE _GPREG(0x00C)
33#define LPC32XX_GPIO_P2_DIR_SET _GPREG(0x010)
34#define LPC32XX_GPIO_P2_DIR_CLR _GPREG(0x014)
35#define LPC32XX_GPIO_P2_DIR_STATE _GPREG(0x018)
36#define LPC32XX_GPIO_P2_INP_STATE _GPREG(0x01C)
37#define LPC32XX_GPIO_P2_OUTP_SET _GPREG(0x020)
38#define LPC32XX_GPIO_P2_OUTP_CLR _GPREG(0x024)
39#define LPC32XX_GPIO_P2_MUX_SET _GPREG(0x028)
40#define LPC32XX_GPIO_P2_MUX_CLR _GPREG(0x02C)
41#define LPC32XX_GPIO_P2_MUX_STATE _GPREG(0x030)
42#define LPC32XX_GPIO_P0_INP_STATE _GPREG(0x040)
43#define LPC32XX_GPIO_P0_OUTP_SET _GPREG(0x044)
44#define LPC32XX_GPIO_P0_OUTP_CLR _GPREG(0x048)
45#define LPC32XX_GPIO_P0_OUTP_STATE _GPREG(0x04C)
46#define LPC32XX_GPIO_P0_DIR_SET _GPREG(0x050)
47#define LPC32XX_GPIO_P0_DIR_CLR _GPREG(0x054)
48#define LPC32XX_GPIO_P0_DIR_STATE _GPREG(0x058)
49#define LPC32XX_GPIO_P1_INP_STATE _GPREG(0x060)
50#define LPC32XX_GPIO_P1_OUTP_SET _GPREG(0x064)
51#define LPC32XX_GPIO_P1_OUTP_CLR _GPREG(0x068)
52#define LPC32XX_GPIO_P1_OUTP_STATE _GPREG(0x06C)
53#define LPC32XX_GPIO_P1_DIR_SET _GPREG(0x070)
54#define LPC32XX_GPIO_P1_DIR_CLR _GPREG(0x074)
55#define LPC32XX_GPIO_P1_DIR_STATE _GPREG(0x078)
56
57#define GPIO012_PIN_TO_BIT(x) (1 << (x))
58#define GPIO3_PIN_TO_BIT(x) (1 << ((x) + 25))
59#define GPO3_PIN_TO_BIT(x) (1 << (x))
60#define GPIO012_PIN_IN_SEL(x, y) (((x) >> (y)) & 1)
61#define GPIO3_PIN_IN_SHIFT(x) ((x) == 5 ? 24 : 10 + (x))
62#define GPIO3_PIN_IN_SEL(x, y) ((x) >> GPIO3_PIN_IN_SHIFT(y))
63#define GPIO3_PIN5_IN_SEL(x) (((x) >> 24) & 1)
64#define GPI3_PIN_IN_SEL(x, y) (((x) >> (y)) & 1)
65
66struct gpio_regs {
67 void __iomem *inp_state;
68 void __iomem *outp_set;
69 void __iomem *outp_clr;
70 void __iomem *dir_set;
71 void __iomem *dir_clr;
72};
73
74/*
75 * GPIO names
76 */
77static const char *gpio_p0_names[LPC32XX_GPIO_P0_MAX] = {
78 "p0.0", "p0.1", "p0.2", "p0.3",
79 "p0.4", "p0.5", "p0.6", "p0.7"
80};
81
82static const char *gpio_p1_names[LPC32XX_GPIO_P1_MAX] = {
83 "p1.0", "p1.1", "p1.2", "p1.3",
84 "p1.4", "p1.5", "p1.6", "p1.7",
85 "p1.8", "p1.9", "p1.10", "p1.11",
86 "p1.12", "p1.13", "p1.14", "p1.15",
87 "p1.16", "p1.17", "p1.18", "p1.19",
88 "p1.20", "p1.21", "p1.22", "p1.23",
89};
90
91static const char *gpio_p2_names[LPC32XX_GPIO_P2_MAX] = {
92 "p2.0", "p2.1", "p2.2", "p2.3",
93 "p2.4", "p2.5", "p2.6", "p2.7",
94 "p2.8", "p2.9", "p2.10", "p2.11",
95 "p2.12"
96};
97
98static const char *gpio_p3_names[LPC32XX_GPIO_P3_MAX] = {
99 "gpi000", "gpio01", "gpio02", "gpio03",
100 "gpio04", "gpio05"
101};
102
103static const char *gpi_p3_names[LPC32XX_GPI_P3_MAX] = {
104 "gpi00", "gpi01", "gpi02", "gpi03",
105 "gpi04", "gpi05", "gpi06", "gpi07",
106 "gpi08", "gpi09", NULL, NULL,
107 NULL, NULL, NULL, "gpi15",
108 "gpi16", "gpi17", "gpi18", "gpi19",
109 "gpi20", "gpi21", "gpi22", "gpi23",
110 "gpi24", "gpi25", "gpi26", "gpi27"
111};
112
113static const char *gpo_p3_names[LPC32XX_GPO_P3_MAX] = {
114 "gpo00", "gpo01", "gpo02", "gpo03",
115 "gpo04", "gpo05", "gpo06", "gpo07",
116 "gpo08", "gpo09", "gpo10", "gpo11",
117 "gpo12", "gpo13", "gpo14", "gpo15",
118 "gpo16", "gpo17", "gpo18", "gpo19",
119 "gpo20", "gpo21", "gpo22", "gpo23"
120};
121
122static struct gpio_regs gpio_grp_regs_p0 = {
123 .inp_state = LPC32XX_GPIO_P0_INP_STATE,
124 .outp_set = LPC32XX_GPIO_P0_OUTP_SET,
125 .outp_clr = LPC32XX_GPIO_P0_OUTP_CLR,
126 .dir_set = LPC32XX_GPIO_P0_DIR_SET,
127 .dir_clr = LPC32XX_GPIO_P0_DIR_CLR,
128};
129
130static struct gpio_regs gpio_grp_regs_p1 = {
131 .inp_state = LPC32XX_GPIO_P1_INP_STATE,
132 .outp_set = LPC32XX_GPIO_P1_OUTP_SET,
133 .outp_clr = LPC32XX_GPIO_P1_OUTP_CLR,
134 .dir_set = LPC32XX_GPIO_P1_DIR_SET,
135 .dir_clr = LPC32XX_GPIO_P1_DIR_CLR,
136};
137
138static struct gpio_regs gpio_grp_regs_p2 = {
139 .inp_state = LPC32XX_GPIO_P2_INP_STATE,
140 .outp_set = LPC32XX_GPIO_P2_OUTP_SET,
141 .outp_clr = LPC32XX_GPIO_P2_OUTP_CLR,
142 .dir_set = LPC32XX_GPIO_P2_DIR_SET,
143 .dir_clr = LPC32XX_GPIO_P2_DIR_CLR,
144};
145
146static struct gpio_regs gpio_grp_regs_p3 = {
147 .inp_state = LPC32XX_GPIO_P3_INP_STATE,
148 .outp_set = LPC32XX_GPIO_P3_OUTP_SET,
149 .outp_clr = LPC32XX_GPIO_P3_OUTP_CLR,
150 .dir_set = LPC32XX_GPIO_P2_DIR_SET,
151 .dir_clr = LPC32XX_GPIO_P2_DIR_CLR,
152};
153
154struct lpc32xx_gpio_chip {
155 struct gpio_chip chip;
156 struct gpio_regs *gpio_grp;
157};
158
159static inline struct lpc32xx_gpio_chip *to_lpc32xx_gpio(
160 struct gpio_chip *gpc)
161{
162 return container_of(gpc, struct lpc32xx_gpio_chip, chip);
163}
164
165static void __set_gpio_dir_p012(struct lpc32xx_gpio_chip *group,
166 unsigned pin, int input)
167{
168 if (input)
169 __raw_writel(GPIO012_PIN_TO_BIT(pin),
170 group->gpio_grp->dir_clr);
171 else
172 __raw_writel(GPIO012_PIN_TO_BIT(pin),
173 group->gpio_grp->dir_set);
174}
175
176static void __set_gpio_dir_p3(struct lpc32xx_gpio_chip *group,
177 unsigned pin, int input)
178{
179 u32 u = GPIO3_PIN_TO_BIT(pin);
180
181 if (input)
182 __raw_writel(u, group->gpio_grp->dir_clr);
183 else
184 __raw_writel(u, group->gpio_grp->dir_set);
185}
186
187static void __set_gpio_level_p012(struct lpc32xx_gpio_chip *group,
188 unsigned pin, int high)
189{
190 if (high)
191 __raw_writel(GPIO012_PIN_TO_BIT(pin),
192 group->gpio_grp->outp_set);
193 else
194 __raw_writel(GPIO012_PIN_TO_BIT(pin),
195 group->gpio_grp->outp_clr);
196}
197
198static void __set_gpio_level_p3(struct lpc32xx_gpio_chip *group,
199 unsigned pin, int high)
200{
201 u32 u = GPIO3_PIN_TO_BIT(pin);
202
203 if (high)
204 __raw_writel(u, group->gpio_grp->outp_set);
205 else
206 __raw_writel(u, group->gpio_grp->outp_clr);
207}
208
209static void __set_gpo_level_p3(struct lpc32xx_gpio_chip *group,
210 unsigned pin, int high)
211{
212 if (high)
213 __raw_writel(GPO3_PIN_TO_BIT(pin), group->gpio_grp->outp_set);
214 else
215 __raw_writel(GPO3_PIN_TO_BIT(pin), group->gpio_grp->outp_clr);
216}
217
218static int __get_gpio_state_p012(struct lpc32xx_gpio_chip *group,
219 unsigned pin)
220{
221 return GPIO012_PIN_IN_SEL(__raw_readl(group->gpio_grp->inp_state),
222 pin);
223}
224
225static int __get_gpio_state_p3(struct lpc32xx_gpio_chip *group,
226 unsigned pin)
227{
228 int state = __raw_readl(group->gpio_grp->inp_state);
229
230 /*
231 * P3 GPIO pin input mapping is not contiguous, GPIOP3-0..4 is mapped
232 * to bits 10..14, while GPIOP3-5 is mapped to bit 24.
233 */
234 return GPIO3_PIN_IN_SEL(state, pin);
235}
236
237static int __get_gpi_state_p3(struct lpc32xx_gpio_chip *group,
238 unsigned pin)
239{
240 return GPI3_PIN_IN_SEL(__raw_readl(group->gpio_grp->inp_state), pin);
241}
242
243/*
244 * GENERIC_GPIO primitives.
245 */
246static int lpc32xx_gpio_dir_input_p012(struct gpio_chip *chip,
247 unsigned pin)
248{
249 struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip);
250
251 __set_gpio_dir_p012(group, pin, 1);
252
253 return 0;
254}
255
256static int lpc32xx_gpio_dir_input_p3(struct gpio_chip *chip,
257 unsigned pin)
258{
259 struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip);
260
261 __set_gpio_dir_p3(group, pin, 1);
262
263 return 0;
264}
265
266static int lpc32xx_gpio_dir_in_always(struct gpio_chip *chip,
267 unsigned pin)
268{
269 return 0;
270}
271
272static int lpc32xx_gpio_get_value_p012(struct gpio_chip *chip, unsigned pin)
273{
274 struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip);
275
276 return __get_gpio_state_p012(group, pin);
277}
278
279static int lpc32xx_gpio_get_value_p3(struct gpio_chip *chip, unsigned pin)
280{
281 struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip);
282
283 return __get_gpio_state_p3(group, pin);
284}
285
286static int lpc32xx_gpi_get_value(struct gpio_chip *chip, unsigned pin)
287{
288 struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip);
289
290 return __get_gpi_state_p3(group, pin);
291}
292
293static int lpc32xx_gpio_dir_output_p012(struct gpio_chip *chip, unsigned pin,
294 int value)
295{
296 struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip);
297
298 __set_gpio_dir_p012(group, pin, 0);
299
300 return 0;
301}
302
303static int lpc32xx_gpio_dir_output_p3(struct gpio_chip *chip, unsigned pin,
304 int value)
305{
306 struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip);
307
308 __set_gpio_dir_p3(group, pin, 0);
309
310 return 0;
311}
312
313static int lpc32xx_gpio_dir_out_always(struct gpio_chip *chip, unsigned pin,
314 int value)
315{
316 return 0;
317}
318
319static void lpc32xx_gpio_set_value_p012(struct gpio_chip *chip, unsigned pin,
320 int value)
321{
322 struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip);
323
324 __set_gpio_level_p012(group, pin, value);
325}
326
327static void lpc32xx_gpio_set_value_p3(struct gpio_chip *chip, unsigned pin,
328 int value)
329{
330 struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip);
331
332 __set_gpio_level_p3(group, pin, value);
333}
334
335static void lpc32xx_gpo_set_value(struct gpio_chip *chip, unsigned pin,
336 int value)
337{
338 struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip);
339
340 __set_gpo_level_p3(group, pin, value);
341}
342
343static int lpc32xx_gpio_request(struct gpio_chip *chip, unsigned pin)
344{
345 if (pin < chip->ngpio)
346 return 0;
347
348 return -EINVAL;
349}
350
351static struct lpc32xx_gpio_chip lpc32xx_gpiochip[] = {
352 {
353 .chip = {
354 .label = "gpio_p0",
355 .direction_input = lpc32xx_gpio_dir_input_p012,
356 .get = lpc32xx_gpio_get_value_p012,
357 .direction_output = lpc32xx_gpio_dir_output_p012,
358 .set = lpc32xx_gpio_set_value_p012,
359 .request = lpc32xx_gpio_request,
360 .base = LPC32XX_GPIO_P0_GRP,
361 .ngpio = LPC32XX_GPIO_P0_MAX,
362 .names = gpio_p0_names,
363 .can_sleep = 0,
364 },
365 .gpio_grp = &gpio_grp_regs_p0,
366 },
367 {
368 .chip = {
369 .label = "gpio_p1",
370 .direction_input = lpc32xx_gpio_dir_input_p012,
371 .get = lpc32xx_gpio_get_value_p012,
372 .direction_output = lpc32xx_gpio_dir_output_p012,
373 .set = lpc32xx_gpio_set_value_p012,
374 .request = lpc32xx_gpio_request,
375 .base = LPC32XX_GPIO_P1_GRP,
376 .ngpio = LPC32XX_GPIO_P1_MAX,
377 .names = gpio_p1_names,
378 .can_sleep = 0,
379 },
380 .gpio_grp = &gpio_grp_regs_p1,
381 },
382 {
383 .chip = {
384 .label = "gpio_p2",
385 .direction_input = lpc32xx_gpio_dir_input_p012,
386 .get = lpc32xx_gpio_get_value_p012,
387 .direction_output = lpc32xx_gpio_dir_output_p012,
388 .set = lpc32xx_gpio_set_value_p012,
389 .request = lpc32xx_gpio_request,
390 .base = LPC32XX_GPIO_P2_GRP,
391 .ngpio = LPC32XX_GPIO_P2_MAX,
392 .names = gpio_p2_names,
393 .can_sleep = 0,
394 },
395 .gpio_grp = &gpio_grp_regs_p2,
396 },
397 {
398 .chip = {
399 .label = "gpio_p3",
400 .direction_input = lpc32xx_gpio_dir_input_p3,
401 .get = lpc32xx_gpio_get_value_p3,
402 .direction_output = lpc32xx_gpio_dir_output_p3,
403 .set = lpc32xx_gpio_set_value_p3,
404 .request = lpc32xx_gpio_request,
405 .base = LPC32XX_GPIO_P3_GRP,
406 .ngpio = LPC32XX_GPIO_P3_MAX,
407 .names = gpio_p3_names,
408 .can_sleep = 0,
409 },
410 .gpio_grp = &gpio_grp_regs_p3,
411 },
412 {
413 .chip = {
414 .label = "gpi_p3",
415 .direction_input = lpc32xx_gpio_dir_in_always,
416 .get = lpc32xx_gpi_get_value,
417 .request = lpc32xx_gpio_request,
418 .base = LPC32XX_GPI_P3_GRP,
419 .ngpio = LPC32XX_GPI_P3_MAX,
420 .names = gpi_p3_names,
421 .can_sleep = 0,
422 },
423 .gpio_grp = &gpio_grp_regs_p3,
424 },
425 {
426 .chip = {
427 .label = "gpo_p3",
428 .direction_output = lpc32xx_gpio_dir_out_always,
429 .set = lpc32xx_gpo_set_value,
430 .request = lpc32xx_gpio_request,
431 .base = LPC32XX_GPO_P3_GRP,
432 .ngpio = LPC32XX_GPO_P3_MAX,
433 .names = gpo_p3_names,
434 .can_sleep = 0,
435 },
436 .gpio_grp = &gpio_grp_regs_p3,
437 },
438};
439
440void __init lpc32xx_gpio_init(void)
441{
442 int i;
443
444 for (i = 0; i < ARRAY_SIZE(lpc32xx_gpiochip); i++)
445 gpiochip_add(&lpc32xx_gpiochip[i].chip);
446}
diff --git a/drivers/gpio/gpio-nomadik.c b/drivers/gpio/gpio-nomadik.c
index 2c212c732d76..740caed2b278 100644
--- a/drivers/gpio/gpio-nomadik.c
+++ b/drivers/gpio/gpio-nomadik.c
@@ -27,8 +27,9 @@
27#include <asm/mach/irq.h> 27#include <asm/mach/irq.h>
28 28
29#include <plat/pincfg.h> 29#include <plat/pincfg.h>
30#include <plat/gpio-nomadik.h>
30#include <mach/hardware.h> 31#include <mach/hardware.h>
31#include <mach/gpio.h> 32#include <asm/gpio.h>
32 33
33/* 34/*
34 * The GPIO module in the Nomadik family of Systems-on-Chip is an 35 * The GPIO module in the Nomadik family of Systems-on-Chip is an
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 0599854e2217..9c27244fd680 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -25,7 +25,7 @@
25#include <mach/hardware.h> 25#include <mach/hardware.h>
26#include <asm/irq.h> 26#include <asm/irq.h>
27#include <mach/irqs.h> 27#include <mach/irqs.h>
28#include <mach/gpio.h> 28#include <asm/gpio.h>
29#include <asm/mach/irq.h> 29#include <asm/mach/irq.h>
30 30
31struct gpio_bank { 31struct gpio_bank {
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
new file mode 100644
index 000000000000..9052925c6fa2
--- /dev/null
+++ b/drivers/gpio/gpio-pxa.c
@@ -0,0 +1,338 @@
1/*
2 * linux/arch/arm/plat-pxa/gpio.c
3 *
4 * Generic PXA GPIO handling
5 *
6 * Author: Nicolas Pitre
7 * Created: Jun 15, 2001
8 * Copyright: MontaVista Software Inc.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14#include <linux/gpio.h>
15#include <linux/init.h>
16#include <linux/irq.h>
17#include <linux/io.h>
18#include <linux/syscore_ops.h>
19#include <linux/slab.h>
20
21#include <mach/gpio-pxa.h>
22
23int pxa_last_gpio;
24
25struct pxa_gpio_chip {
26 struct gpio_chip chip;
27 void __iomem *regbase;
28 char label[10];
29
30 unsigned long irq_mask;
31 unsigned long irq_edge_rise;
32 unsigned long irq_edge_fall;
33
34#ifdef CONFIG_PM
35 unsigned long saved_gplr;
36 unsigned long saved_gpdr;
37 unsigned long saved_grer;
38 unsigned long saved_gfer;
39#endif
40};
41
42static DEFINE_SPINLOCK(gpio_lock);
43static struct pxa_gpio_chip *pxa_gpio_chips;
44
45#define for_each_gpio_chip(i, c) \
46 for (i = 0, c = &pxa_gpio_chips[0]; i <= pxa_last_gpio; i += 32, c++)
47
48static inline void __iomem *gpio_chip_base(struct gpio_chip *c)
49{
50 return container_of(c, struct pxa_gpio_chip, chip)->regbase;
51}
52
53static inline struct pxa_gpio_chip *gpio_to_pxachip(unsigned gpio)
54{
55 return &pxa_gpio_chips[gpio_to_bank(gpio)];
56}
57
58static int pxa_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
59{
60 void __iomem *base = gpio_chip_base(chip);
61 uint32_t value, mask = 1 << offset;
62 unsigned long flags;
63
64 spin_lock_irqsave(&gpio_lock, flags);
65
66 value = __raw_readl(base + GPDR_OFFSET);
67 if (__gpio_is_inverted(chip->base + offset))
68 value |= mask;
69 else
70 value &= ~mask;
71 __raw_writel(value, base + GPDR_OFFSET);
72
73 spin_unlock_irqrestore(&gpio_lock, flags);
74 return 0;
75}
76
77static int pxa_gpio_direction_output(struct gpio_chip *chip,
78 unsigned offset, int value)
79{
80 void __iomem *base = gpio_chip_base(chip);
81 uint32_t tmp, mask = 1 << offset;
82 unsigned long flags;
83
84 __raw_writel(mask, base + (value ? GPSR_OFFSET : GPCR_OFFSET));
85
86 spin_lock_irqsave(&gpio_lock, flags);
87
88 tmp = __raw_readl(base + GPDR_OFFSET);
89 if (__gpio_is_inverted(chip->base + offset))
90 tmp &= ~mask;
91 else
92 tmp |= mask;
93 __raw_writel(tmp, base + GPDR_OFFSET);
94
95 spin_unlock_irqrestore(&gpio_lock, flags);
96 return 0;
97}
98
99static int pxa_gpio_get(struct gpio_chip *chip, unsigned offset)
100{
101 return __raw_readl(gpio_chip_base(chip) + GPLR_OFFSET) & (1 << offset);
102}
103
104static void pxa_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
105{
106 __raw_writel(1 << offset, gpio_chip_base(chip) +
107 (value ? GPSR_OFFSET : GPCR_OFFSET));
108}
109
110static int __init pxa_init_gpio_chip(int gpio_end)
111{
112 int i, gpio, nbanks = gpio_to_bank(gpio_end) + 1;
113 struct pxa_gpio_chip *chips;
114
115 chips = kzalloc(nbanks * sizeof(struct pxa_gpio_chip), GFP_KERNEL);
116 if (chips == NULL) {
117 pr_err("%s: failed to allocate GPIO chips\n", __func__);
118 return -ENOMEM;
119 }
120
121 for (i = 0, gpio = 0; i < nbanks; i++, gpio += 32) {
122 struct gpio_chip *c = &chips[i].chip;
123
124 sprintf(chips[i].label, "gpio-%d", i);
125 chips[i].regbase = (void __iomem *)GPIO_BANK(i);
126
127 c->base = gpio;
128 c->label = chips[i].label;
129
130 c->direction_input = pxa_gpio_direction_input;
131 c->direction_output = pxa_gpio_direction_output;
132 c->get = pxa_gpio_get;
133 c->set = pxa_gpio_set;
134
135 /* number of GPIOs on last bank may be less than 32 */
136 c->ngpio = (gpio + 31 > gpio_end) ? (gpio_end - gpio + 1) : 32;
137 gpiochip_add(c);
138 }
139 pxa_gpio_chips = chips;
140 return 0;
141}
142
143/* Update only those GRERx and GFERx edge detection register bits if those
144 * bits are set in c->irq_mask
145 */
146static inline void update_edge_detect(struct pxa_gpio_chip *c)
147{
148 uint32_t grer, gfer;
149
150 grer = __raw_readl(c->regbase + GRER_OFFSET) & ~c->irq_mask;
151 gfer = __raw_readl(c->regbase + GFER_OFFSET) & ~c->irq_mask;
152 grer |= c->irq_edge_rise & c->irq_mask;
153 gfer |= c->irq_edge_fall & c->irq_mask;
154 __raw_writel(grer, c->regbase + GRER_OFFSET);
155 __raw_writel(gfer, c->regbase + GFER_OFFSET);
156}
157
158static int pxa_gpio_irq_type(struct irq_data *d, unsigned int type)
159{
160 struct pxa_gpio_chip *c;
161 int gpio = irq_to_gpio(d->irq);
162 unsigned long gpdr, mask = GPIO_bit(gpio);
163
164 c = gpio_to_pxachip(gpio);
165
166 if (type == IRQ_TYPE_PROBE) {
167 /* Don't mess with enabled GPIOs using preconfigured edges or
168 * GPIOs set to alternate function or to output during probe
169 */
170 if ((c->irq_edge_rise | c->irq_edge_fall) & GPIO_bit(gpio))
171 return 0;
172
173 if (__gpio_is_occupied(gpio))
174 return 0;
175
176 type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
177 }
178
179 gpdr = __raw_readl(c->regbase + GPDR_OFFSET);
180
181 if (__gpio_is_inverted(gpio))
182 __raw_writel(gpdr | mask, c->regbase + GPDR_OFFSET);
183 else
184 __raw_writel(gpdr & ~mask, c->regbase + GPDR_OFFSET);
185
186 if (type & IRQ_TYPE_EDGE_RISING)
187 c->irq_edge_rise |= mask;
188 else
189 c->irq_edge_rise &= ~mask;
190
191 if (type & IRQ_TYPE_EDGE_FALLING)
192 c->irq_edge_fall |= mask;
193 else
194 c->irq_edge_fall &= ~mask;
195
196 update_edge_detect(c);
197
198 pr_debug("%s: IRQ%d (GPIO%d) - edge%s%s\n", __func__, d->irq, gpio,
199 ((type & IRQ_TYPE_EDGE_RISING) ? " rising" : ""),
200 ((type & IRQ_TYPE_EDGE_FALLING) ? " falling" : ""));
201 return 0;
202}
203
204static void pxa_gpio_demux_handler(unsigned int irq, struct irq_desc *desc)
205{
206 struct pxa_gpio_chip *c;
207 int loop, gpio, gpio_base, n;
208 unsigned long gedr;
209
210 do {
211 loop = 0;
212 for_each_gpio_chip(gpio, c) {
213 gpio_base = c->chip.base;
214
215 gedr = __raw_readl(c->regbase + GEDR_OFFSET);
216 gedr = gedr & c->irq_mask;
217 __raw_writel(gedr, c->regbase + GEDR_OFFSET);
218
219 n = find_first_bit(&gedr, BITS_PER_LONG);
220 while (n < BITS_PER_LONG) {
221 loop = 1;
222
223 generic_handle_irq(gpio_to_irq(gpio_base + n));
224 n = find_next_bit(&gedr, BITS_PER_LONG, n + 1);
225 }
226 }
227 } while (loop);
228}
229
230static void pxa_ack_muxed_gpio(struct irq_data *d)
231{
232 int gpio = irq_to_gpio(d->irq);
233 struct pxa_gpio_chip *c = gpio_to_pxachip(gpio);
234
235 __raw_writel(GPIO_bit(gpio), c->regbase + GEDR_OFFSET);
236}
237
238static void pxa_mask_muxed_gpio(struct irq_data *d)
239{
240 int gpio = irq_to_gpio(d->irq);
241 struct pxa_gpio_chip *c = gpio_to_pxachip(gpio);
242 uint32_t grer, gfer;
243
244 c->irq_mask &= ~GPIO_bit(gpio);
245
246 grer = __raw_readl(c->regbase + GRER_OFFSET) & ~GPIO_bit(gpio);
247 gfer = __raw_readl(c->regbase + GFER_OFFSET) & ~GPIO_bit(gpio);
248 __raw_writel(grer, c->regbase + GRER_OFFSET);
249 __raw_writel(gfer, c->regbase + GFER_OFFSET);
250}
251
252static void pxa_unmask_muxed_gpio(struct irq_data *d)
253{
254 int gpio = irq_to_gpio(d->irq);
255 struct pxa_gpio_chip *c = gpio_to_pxachip(gpio);
256
257 c->irq_mask |= GPIO_bit(gpio);
258 update_edge_detect(c);
259}
260
261static struct irq_chip pxa_muxed_gpio_chip = {
262 .name = "GPIO",
263 .irq_ack = pxa_ack_muxed_gpio,
264 .irq_mask = pxa_mask_muxed_gpio,
265 .irq_unmask = pxa_unmask_muxed_gpio,
266 .irq_set_type = pxa_gpio_irq_type,
267};
268
269void __init pxa_init_gpio(int mux_irq, int start, int end, set_wake_t fn)
270{
271 struct pxa_gpio_chip *c;
272 int gpio, irq;
273
274 pxa_last_gpio = end;
275
276 /* Initialize GPIO chips */
277 pxa_init_gpio_chip(end);
278
279 /* clear all GPIO edge detects */
280 for_each_gpio_chip(gpio, c) {
281 __raw_writel(0, c->regbase + GFER_OFFSET);
282 __raw_writel(0, c->regbase + GRER_OFFSET);
283 __raw_writel(~0,c->regbase + GEDR_OFFSET);
284 }
285
286 for (irq = gpio_to_irq(start); irq <= gpio_to_irq(end); irq++) {
287 irq_set_chip_and_handler(irq, &pxa_muxed_gpio_chip,
288 handle_edge_irq);
289 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
290 }
291
292 /* Install handler for GPIO>=2 edge detect interrupts */
293 irq_set_chained_handler(mux_irq, pxa_gpio_demux_handler);
294 pxa_muxed_gpio_chip.irq_set_wake = fn;
295}
296
297#ifdef CONFIG_PM
298static int pxa_gpio_suspend(void)
299{
300 struct pxa_gpio_chip *c;
301 int gpio;
302
303 for_each_gpio_chip(gpio, c) {
304 c->saved_gplr = __raw_readl(c->regbase + GPLR_OFFSET);
305 c->saved_gpdr = __raw_readl(c->regbase + GPDR_OFFSET);
306 c->saved_grer = __raw_readl(c->regbase + GRER_OFFSET);
307 c->saved_gfer = __raw_readl(c->regbase + GFER_OFFSET);
308
309 /* Clear GPIO transition detect bits */
310 __raw_writel(0xffffffff, c->regbase + GEDR_OFFSET);
311 }
312 return 0;
313}
314
315static void pxa_gpio_resume(void)
316{
317 struct pxa_gpio_chip *c;
318 int gpio;
319
320 for_each_gpio_chip(gpio, c) {
321 /* restore level with set/clear */
322 __raw_writel( c->saved_gplr, c->regbase + GPSR_OFFSET);
323 __raw_writel(~c->saved_gplr, c->regbase + GPCR_OFFSET);
324
325 __raw_writel(c->saved_grer, c->regbase + GRER_OFFSET);
326 __raw_writel(c->saved_gfer, c->regbase + GFER_OFFSET);
327 __raw_writel(c->saved_gpdr, c->regbase + GPDR_OFFSET);
328 }
329}
330#else
331#define pxa_gpio_suspend NULL
332#define pxa_gpio_resume NULL
333#endif
334
335struct syscore_ops pxa_gpio_syscore_ops = {
336 .suspend = pxa_gpio_suspend,
337 .resume = pxa_gpio_resume,
338};
diff --git a/drivers/gpio/gpio-sa1100.c b/drivers/gpio/gpio-sa1100.c
new file mode 100644
index 000000000000..b6c1f6d80649
--- /dev/null
+++ b/drivers/gpio/gpio-sa1100.c
@@ -0,0 +1,63 @@
1/*
2 * linux/arch/arm/mach-sa1100/gpio.c
3 *
4 * Generic SA-1100 GPIO handling
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/gpio.h>
11#include <linux/init.h>
12#include <linux/module.h>
13
14#include <mach/hardware.h>
15
16static int sa1100_gpio_get(struct gpio_chip *chip, unsigned offset)
17{
18 return GPLR & GPIO_GPIO(offset);
19}
20
21static void sa1100_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
22{
23 if (value)
24 GPSR = GPIO_GPIO(offset);
25 else
26 GPCR = GPIO_GPIO(offset);
27}
28
29static int sa1100_direction_input(struct gpio_chip *chip, unsigned offset)
30{
31 unsigned long flags;
32
33 local_irq_save(flags);
34 GPDR &= ~GPIO_GPIO(offset);
35 local_irq_restore(flags);
36 return 0;
37}
38
39static int sa1100_direction_output(struct gpio_chip *chip, unsigned offset, int value)
40{
41 unsigned long flags;
42
43 local_irq_save(flags);
44 sa1100_gpio_set(chip, offset, value);
45 GPDR |= GPIO_GPIO(offset);
46 local_irq_restore(flags);
47 return 0;
48}
49
50static struct gpio_chip sa1100_gpio_chip = {
51 .label = "gpio",
52 .direction_input = sa1100_direction_input,
53 .direction_output = sa1100_direction_output,
54 .set = sa1100_gpio_set,
55 .get = sa1100_gpio_get,
56 .base = 0,
57 .ngpio = GPIO_MAX + 1,
58};
59
60void __init sa1100_init_gpio(void)
61{
62 gpiochip_add(&sa1100_gpio_chip);
63}
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index 747eb40e8afe..6b65207c8106 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -27,6 +27,7 @@
27 27
28#include <asm/mach/irq.h> 28#include <asm/mach/irq.h>
29 29
30#include <mach/gpio-tegra.h>
30#include <mach/iomap.h> 31#include <mach/iomap.h>
31#include <mach/suspend.h> 32#include <mach/suspend.h>
32 33
@@ -134,7 +135,10 @@ static int tegra_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
134 return 0; 135 return 0;
135} 136}
136 137
137 138static int tegra_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
139{
140 return TEGRA_GPIO_TO_IRQ(offset);
141}
138 142
139static struct gpio_chip tegra_gpio_chip = { 143static struct gpio_chip tegra_gpio_chip = {
140 .label = "tegra-gpio", 144 .label = "tegra-gpio",
@@ -142,6 +146,7 @@ static struct gpio_chip tegra_gpio_chip = {
142 .get = tegra_gpio_get, 146 .get = tegra_gpio_get,
143 .direction_output = tegra_gpio_direction_output, 147 .direction_output = tegra_gpio_direction_output,
144 .set = tegra_gpio_set, 148 .set = tegra_gpio_set,
149 .to_irq = tegra_gpio_to_irq,
145 .base = 0, 150 .base = 0,
146 .ngpio = TEGRA_NR_GPIOS, 151 .ngpio = TEGRA_NR_GPIOS,
147}; 152};
@@ -331,6 +336,7 @@ static struct lock_class_key gpio_lock_class;
331static int __init tegra_gpio_init(void) 336static int __init tegra_gpio_init(void)
332{ 337{
333 struct tegra_gpio_bank *bank; 338 struct tegra_gpio_bank *bank;
339 int gpio;
334 int i; 340 int i;
335 int j; 341 int j;
336 342
@@ -352,14 +358,17 @@ static int __init tegra_gpio_init(void)
352 358
353 gpiochip_add(&tegra_gpio_chip); 359 gpiochip_add(&tegra_gpio_chip);
354 360
355 for (i = INT_GPIO_BASE; i < (INT_GPIO_BASE + TEGRA_NR_GPIOS); i++) { 361 for (gpio = 0; gpio < TEGRA_NR_GPIOS; gpio++) {
356 bank = &tegra_gpio_banks[GPIO_BANK(irq_to_gpio(i))]; 362 int irq = TEGRA_GPIO_TO_IRQ(gpio);
363 /* No validity check; all Tegra GPIOs are valid IRQs */
364
365 bank = &tegra_gpio_banks[GPIO_BANK(gpio)];
357 366
358 irq_set_lockdep_class(i, &gpio_lock_class); 367 irq_set_lockdep_class(irq, &gpio_lock_class);
359 irq_set_chip_data(i, bank); 368 irq_set_chip_data(irq, bank);
360 irq_set_chip_and_handler(i, &tegra_gpio_irq_chip, 369 irq_set_chip_and_handler(irq, &tegra_gpio_irq_chip,
361 handle_simple_irq); 370 handle_simple_irq);
362 set_irq_flags(i, IRQF_VALID); 371 set_irq_flags(irq, IRQF_VALID);
363 } 372 }
364 373
365 for (i = 0; i < ARRAY_SIZE(tegra_gpio_banks); i++) { 374 for (i = 0; i < ARRAY_SIZE(tegra_gpio_banks); i++) {
diff --git a/drivers/gpio/gpio-tnetv107x.c b/drivers/gpio/gpio-tnetv107x.c
new file mode 100644
index 000000000000..3fa3e2867e19
--- /dev/null
+++ b/drivers/gpio/gpio-tnetv107x.c
@@ -0,0 +1,205 @@
1/*
2 * Texas Instruments TNETV107X GPIO Controller
3 *
4 * Copyright (C) 2010 Texas Instruments
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation version 2.
9 *
10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11 * kind, whether express or implied; without even the implied warranty
12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15#include <linux/kernel.h>
16#include <linux/init.h>
17#include <linux/gpio.h>
18
19#include <mach/common.h>
20#include <mach/tnetv107x.h>
21
22struct tnetv107x_gpio_regs {
23 u32 idver;
24 u32 data_in[3];
25 u32 data_out[3];
26 u32 direction[3];
27 u32 enable[3];
28};
29
30#define gpio_reg_index(gpio) ((gpio) >> 5)
31#define gpio_reg_bit(gpio) BIT((gpio) & 0x1f)
32
33#define gpio_reg_rmw(reg, mask, val) \
34 __raw_writel((__raw_readl(reg) & ~(mask)) | (val), (reg))
35
36#define gpio_reg_set_bit(reg, gpio) \
37 gpio_reg_rmw((reg) + gpio_reg_index(gpio), 0, gpio_reg_bit(gpio))
38
39#define gpio_reg_clear_bit(reg, gpio) \
40 gpio_reg_rmw((reg) + gpio_reg_index(gpio), gpio_reg_bit(gpio), 0)
41
42#define gpio_reg_get_bit(reg, gpio) \
43 (__raw_readl((reg) + gpio_reg_index(gpio)) & gpio_reg_bit(gpio))
44
45#define chip2controller(chip) \
46 container_of(chip, struct davinci_gpio_controller, chip)
47
48#define TNETV107X_GPIO_CTLRS DIV_ROUND_UP(TNETV107X_N_GPIO, 32)
49
50static struct davinci_gpio_controller chips[TNETV107X_GPIO_CTLRS];
51
52static int tnetv107x_gpio_request(struct gpio_chip *chip, unsigned offset)
53{
54 struct davinci_gpio_controller *ctlr = chip2controller(chip);
55 struct tnetv107x_gpio_regs __iomem *regs = ctlr->regs;
56 unsigned gpio = chip->base + offset;
57 unsigned long flags;
58
59 spin_lock_irqsave(&ctlr->lock, flags);
60
61 gpio_reg_set_bit(regs->enable, gpio);
62
63 spin_unlock_irqrestore(&ctlr->lock, flags);
64
65 return 0;
66}
67
68static void tnetv107x_gpio_free(struct gpio_chip *chip, unsigned offset)
69{
70 struct davinci_gpio_controller *ctlr = chip2controller(chip);
71 struct tnetv107x_gpio_regs __iomem *regs = ctlr->regs;
72 unsigned gpio = chip->base + offset;
73 unsigned long flags;
74
75 spin_lock_irqsave(&ctlr->lock, flags);
76
77 gpio_reg_clear_bit(regs->enable, gpio);
78
79 spin_unlock_irqrestore(&ctlr->lock, flags);
80}
81
82static int tnetv107x_gpio_dir_in(struct gpio_chip *chip, unsigned offset)
83{
84 struct davinci_gpio_controller *ctlr = chip2controller(chip);
85 struct tnetv107x_gpio_regs __iomem *regs = ctlr->regs;
86 unsigned gpio = chip->base + offset;
87 unsigned long flags;
88
89 spin_lock_irqsave(&ctlr->lock, flags);
90
91 gpio_reg_set_bit(regs->direction, gpio);
92
93 spin_unlock_irqrestore(&ctlr->lock, flags);
94
95 return 0;
96}
97
98static int tnetv107x_gpio_dir_out(struct gpio_chip *chip,
99 unsigned offset, int value)
100{
101 struct davinci_gpio_controller *ctlr = chip2controller(chip);
102 struct tnetv107x_gpio_regs __iomem *regs = ctlr->regs;
103 unsigned gpio = chip->base + offset;
104 unsigned long flags;
105
106 spin_lock_irqsave(&ctlr->lock, flags);
107
108 if (value)
109 gpio_reg_set_bit(regs->data_out, gpio);
110 else
111 gpio_reg_clear_bit(regs->data_out, gpio);
112
113 gpio_reg_clear_bit(regs->direction, gpio);
114
115 spin_unlock_irqrestore(&ctlr->lock, flags);
116
117 return 0;
118}
119
120static int tnetv107x_gpio_get(struct gpio_chip *chip, unsigned offset)
121{
122 struct davinci_gpio_controller *ctlr = chip2controller(chip);
123 struct tnetv107x_gpio_regs __iomem *regs = ctlr->regs;
124 unsigned gpio = chip->base + offset;
125 int ret;
126
127 ret = gpio_reg_get_bit(regs->data_in, gpio);
128
129 return ret ? 1 : 0;
130}
131
132static void tnetv107x_gpio_set(struct gpio_chip *chip,
133 unsigned offset, int value)
134{
135 struct davinci_gpio_controller *ctlr = chip2controller(chip);
136 struct tnetv107x_gpio_regs __iomem *regs = ctlr->regs;
137 unsigned gpio = chip->base + offset;
138 unsigned long flags;
139
140 spin_lock_irqsave(&ctlr->lock, flags);
141
142 if (value)
143 gpio_reg_set_bit(regs->data_out, gpio);
144 else
145 gpio_reg_clear_bit(regs->data_out, gpio);
146
147 spin_unlock_irqrestore(&ctlr->lock, flags);
148}
149
150static int __init tnetv107x_gpio_setup(void)
151{
152 int i, base;
153 unsigned ngpio;
154 struct davinci_soc_info *soc_info = &davinci_soc_info;
155 struct tnetv107x_gpio_regs *regs;
156 struct davinci_gpio_controller *ctlr;
157
158 if (soc_info->gpio_type != GPIO_TYPE_TNETV107X)
159 return 0;
160
161 ngpio = soc_info->gpio_num;
162 if (ngpio == 0) {
163 pr_err("GPIO setup: how many GPIOs?\n");
164 return -EINVAL;
165 }
166
167 if (WARN_ON(TNETV107X_N_GPIO < ngpio))
168 ngpio = TNETV107X_N_GPIO;
169
170 regs = ioremap(soc_info->gpio_base, SZ_4K);
171 if (WARN_ON(!regs))
172 return -EINVAL;
173
174 for (i = 0, base = 0; base < ngpio; i++, base += 32) {
175 ctlr = &chips[i];
176
177 ctlr->chip.label = "tnetv107x";
178 ctlr->chip.can_sleep = 0;
179 ctlr->chip.base = base;
180 ctlr->chip.ngpio = ngpio - base;
181 if (ctlr->chip.ngpio > 32)
182 ctlr->chip.ngpio = 32;
183
184 ctlr->chip.request = tnetv107x_gpio_request;
185 ctlr->chip.free = tnetv107x_gpio_free;
186 ctlr->chip.direction_input = tnetv107x_gpio_dir_in;
187 ctlr->chip.get = tnetv107x_gpio_get;
188 ctlr->chip.direction_output = tnetv107x_gpio_dir_out;
189 ctlr->chip.set = tnetv107x_gpio_set;
190
191 spin_lock_init(&ctlr->lock);
192
193 ctlr->regs = regs;
194 ctlr->set_data = &regs->data_out[i];
195 ctlr->clr_data = &regs->data_out[i];
196 ctlr->in_data = &regs->data_in[i];
197
198 gpiochip_add(&ctlr->chip);
199 }
200
201 soc_info->gpio_ctlrs = chips;
202 soc_info->gpio_ctlrs_num = DIV_ROUND_UP(ngpio, 32);
203 return 0;
204}
205pure_initcall(tnetv107x_gpio_setup);
diff --git a/drivers/gpio/gpio-u300.c b/drivers/gpio/gpio-u300.c
index 53e8255cb0ba..4035778852b0 100644
--- a/drivers/gpio/gpio-u300.c
+++ b/drivers/gpio/gpio-u300.c
@@ -1,18 +1,17 @@
1/* 1/*
2 * U300 GPIO module. 2 * U300 GPIO module.
3 * 3 *
4 * Copyright (C) 2007-2009 ST-Ericsson AB 4 * Copyright (C) 2007-2011 ST-Ericsson AB
5 * License terms: GNU General Public License (GPL) version 2 5 * License terms: GNU General Public License (GPL) version 2
6 * This can driver either of the two basic GPIO cores 6 * This can driver either of the two basic GPIO cores
7 * available in the U300 platforms: 7 * available in the U300 platforms:
8 * COH 901 335 - Used in DB3150 (U300 1.0) and DB3200 (U330 1.0) 8 * COH 901 335 - Used in DB3150 (U300 1.0) and DB3200 (U330 1.0)
9 * COH 901 571/3 - Used in DB3210 (U365 2.0) and DB3350 (U335 1.0) 9 * COH 901 571/3 - Used in DB3210 (U365 2.0) and DB3350 (U335 1.0)
10 * Notice that you also have inline macros in <asm-arch/gpio.h> 10 * Author: Linus Walleij <linus.walleij@linaro.org>
11 * Author: Linus Walleij <linus.walleij@stericsson.com>
12 * Author: Jonas Aaberg <jonas.aberg@stericsson.com> 11 * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
13 *
14 */ 12 */
15#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/irq.h>
16#include <linux/interrupt.h> 15#include <linux/interrupt.h>
17#include <linux/delay.h> 16#include <linux/delay.h>
18#include <linux/errno.h> 17#include <linux/errno.h>
@@ -21,677 +20,898 @@
21#include <linux/err.h> 20#include <linux/err.h>
22#include <linux/platform_device.h> 21#include <linux/platform_device.h>
23#include <linux/gpio.h> 22#include <linux/gpio.h>
23#include <linux/list.h>
24#include <linux/slab.h>
25#include <mach/gpio-u300.h>
24 26
25/* Reference to GPIO block clock */ 27/*
26static struct clk *clk; 28 * Bias modes for U300 GPIOs
29 *
30 * GPIO_U300_CONFIG_BIAS_UNKNOWN: this bias mode is not known to us
31 * GPIO_U300_CONFIG_BIAS_FLOAT: no specific bias, the GPIO will float or state
32 * is not controlled by software
33 * GPIO_U300_CONFIG_BIAS_PULL_UP: the GPIO will be pulled up (usually with high
34 * impedance to VDD)
35 */
36#define GPIO_U300_CONFIG_BIAS_UNKNOWN 0x1000
37#define GPIO_U300_CONFIG_BIAS_FLOAT 0x1001
38#define GPIO_U300_CONFIG_BIAS_PULL_UP 0x1002
27 39
28/* Memory resource */ 40/*
29static struct resource *memres; 41 * Drive modes for U300 GPIOs (output)
30static void __iomem *virtbase; 42 *
31static struct device *gpiodev; 43 * GPIO_U300_CONFIG_DRIVE_PUSH_PULL: the GPIO will be driven actively high and
44 * low, this is the most typical case and is typically achieved with two
45 * active transistors on the output
46 * GPIO_U300_CONFIG_DRIVE_OPEN_DRAIN: the GPIO will be driven with open drain
47 * (open collector) which means it is usually wired with other output
48 * ports which are then pulled up with an external resistor
49 * GPIO_U300_CONFIG_DRIVE_OPEN_SOURCE: the GPIO will be driven with open drain
50 * (open emitter) which is the same as open drain mutatis mutandis but
51 * pulled to ground
52 */
53#define GPIO_U300_CONFIG_DRIVE_PUSH_PULL 0x2000
54#define GPIO_U300_CONFIG_DRIVE_OPEN_DRAIN 0x2001
55#define GPIO_U300_CONFIG_DRIVE_OPEN_SOURCE 0x2002
56
57/*
58 * Register definitions for COH 901 335 variant
59 */
60#define U300_335_PORT_STRIDE (0x1C)
61/* Port X Pin Data Register 32bit, this is both input and output (R/W) */
62#define U300_335_PXPDIR (0x00)
63#define U300_335_PXPDOR (0x00)
64/* Port X Pin Config Register 32bit (R/W) */
65#define U300_335_PXPCR (0x04)
66/* This register layout is the same in both blocks */
67#define U300_GPIO_PXPCR_ALL_PINS_MODE_MASK (0x0000FFFFUL)
68#define U300_GPIO_PXPCR_PIN_MODE_MASK (0x00000003UL)
69#define U300_GPIO_PXPCR_PIN_MODE_SHIFT (0x00000002UL)
70#define U300_GPIO_PXPCR_PIN_MODE_INPUT (0x00000000UL)
71#define U300_GPIO_PXPCR_PIN_MODE_OUTPUT_PUSH_PULL (0x00000001UL)
72#define U300_GPIO_PXPCR_PIN_MODE_OUTPUT_OPEN_DRAIN (0x00000002UL)
73#define U300_GPIO_PXPCR_PIN_MODE_OUTPUT_OPEN_SOURCE (0x00000003UL)
74/* Port X Interrupt Event Register 32bit (R/W) */
75#define U300_335_PXIEV (0x08)
76/* Port X Interrupt Enable Register 32bit (R/W) */
77#define U300_335_PXIEN (0x0C)
78/* Port X Interrupt Force Register 32bit (R/W) */
79#define U300_335_PXIFR (0x10)
80/* Port X Interrupt Config Register 32bit (R/W) */
81#define U300_335_PXICR (0x14)
82/* This register layout is the same in both blocks */
83#define U300_GPIO_PXICR_ALL_IRQ_CONFIG_MASK (0x000000FFUL)
84#define U300_GPIO_PXICR_IRQ_CONFIG_MASK (0x00000001UL)
85#define U300_GPIO_PXICR_IRQ_CONFIG_FALLING_EDGE (0x00000000UL)
86#define U300_GPIO_PXICR_IRQ_CONFIG_RISING_EDGE (0x00000001UL)
87/* Port X Pull-up Enable Register 32bit (R/W) */
88#define U300_335_PXPER (0x18)
89/* This register layout is the same in both blocks */
90#define U300_GPIO_PXPER_ALL_PULL_UP_DISABLE_MASK (0x000000FFUL)
91#define U300_GPIO_PXPER_PULL_UP_DISABLE (0x00000001UL)
92/* Control Register 32bit (R/W) */
93#define U300_335_CR (0x54)
94#define U300_335_CR_BLOCK_CLOCK_ENABLE (0x00000001UL)
95
96/*
97 * Register definitions for COH 901 571 / 3 variant
98 */
99#define U300_571_PORT_STRIDE (0x30)
100/*
101 * Control Register 32bit (R/W)
102 * bit 15-9 (mask 0x0000FE00) contains the number of cores. 8*cores
103 * gives the number of GPIO pins.
104 * bit 8-2 (mask 0x000001FC) contains the core version ID.
105 */
106#define U300_571_CR (0x00)
107#define U300_571_CR_SYNC_SEL_ENABLE (0x00000002UL)
108#define U300_571_CR_BLOCK_CLKRQ_ENABLE (0x00000001UL)
109/*
110 * These registers have the same layout and function as the corresponding
111 * COH 901 335 registers, just at different offset.
112 */
113#define U300_571_PXPDIR (0x04)
114#define U300_571_PXPDOR (0x08)
115#define U300_571_PXPCR (0x0C)
116#define U300_571_PXPER (0x10)
117#define U300_571_PXIEV (0x14)
118#define U300_571_PXIEN (0x18)
119#define U300_571_PXIFR (0x1C)
120#define U300_571_PXICR (0x20)
121
122/* 8 bits per port, no version has more than 7 ports */
123#define U300_GPIO_PINS_PER_PORT 8
124#define U300_GPIO_MAX (U300_GPIO_PINS_PER_PORT * 7)
125
126struct u300_gpio {
127 struct gpio_chip chip;
128 struct list_head port_list;
129 struct clk *clk;
130 struct resource *memres;
131 void __iomem *base;
132 struct device *dev;
133 int irq_base;
134 u32 stride;
135 /* Register offsets */
136 u32 pcr;
137 u32 dor;
138 u32 dir;
139 u32 per;
140 u32 icr;
141 u32 ien;
142 u32 iev;
143};
32 144
33struct u300_gpio_port { 145struct u300_gpio_port {
34 const char *name; 146 struct list_head node;
147 struct u300_gpio *gpio;
148 char name[8];
35 int irq; 149 int irq;
36 int number; 150 int number;
151 u8 toggle_edge_mode;
37}; 152};
38 153
154/*
155 * Macro to expand to read a specific register found in the "gpio"
156 * struct. It requires the struct u300_gpio *gpio variable to exist in
157 * its context. It calculates the port offset from the given pin
158 * offset, muliplies by the port stride and adds the register offset
159 * so it provides a pointer to the desired register.
160 */
161#define U300_PIN_REG(pin, reg) \
162 (gpio->base + (pin >> 3) * gpio->stride + gpio->reg)
39 163
40static struct u300_gpio_port gpio_ports[] = { 164/*
41 { 165 * Provides a bitmask for a specific gpio pin inside an 8-bit GPIO
42 .name = "gpio0", 166 * register.
43 .number = 0, 167 */
44 }, 168#define U300_PIN_BIT(pin) \
45 { 169 (1 << (pin & 0x07))
46 .name = "gpio1",
47 .number = 1,
48 },
49 {
50 .name = "gpio2",
51 .number = 2,
52 },
53#ifdef U300_COH901571_3
54 {
55 .name = "gpio3",
56 .number = 3,
57 },
58 {
59 .name = "gpio4",
60 .number = 4,
61 },
62#ifdef CONFIG_MACH_U300_BS335
63 {
64 .name = "gpio5",
65 .number = 5,
66 },
67 {
68 .name = "gpio6",
69 .number = 6,
70 },
71#endif
72#endif
73 170
171struct u300_gpio_confdata {
172 u16 bias_mode;
173 bool output;
174 int outval;
74}; 175};
75 176
177/* BS335 has seven ports of 8 bits each = GPIO pins 0..55 */
178#define BS335_GPIO_NUM_PORTS 7
179/* BS365 has five ports of 8 bits each = GPIO pins 0..39 */
180#define BS365_GPIO_NUM_PORTS 5
76 181
77#ifdef U300_COH901571_3 182#define U300_FLOATING_INPUT { \
183 .bias_mode = GPIO_U300_CONFIG_BIAS_FLOAT, \
184 .output = false, \
185}
78 186
79/* Default input value */ 187#define U300_PULL_UP_INPUT { \
80#define DEFAULT_OUTPUT_LOW 0 188 .bias_mode = GPIO_U300_CONFIG_BIAS_PULL_UP, \
81#define DEFAULT_OUTPUT_HIGH 1 189 .output = false, \
190}
82 191
83/* GPIO Pull-Up status */ 192#define U300_OUTPUT_LOW { \
84#define DISABLE_PULL_UP 0 193 .output = true, \
85#define ENABLE_PULL_UP 1 194 .outval = 0, \
195}
86 196
87#define GPIO_NOT_USED 0 197#define U300_OUTPUT_HIGH { \
88#define GPIO_IN 1 198 .output = true, \
89#define GPIO_OUT 2 199 .outval = 1, \
200}
90 201
91struct u300_gpio_configuration_data {
92 unsigned char pin_usage;
93 unsigned char default_output_value;
94 unsigned char pull_up;
95};
96 202
97/* Initial configuration */ 203/* Initial configuration */
98const struct u300_gpio_configuration_data 204static const struct __initdata u300_gpio_confdata
99u300_gpio_config[U300_GPIO_NUM_PORTS][U300_GPIO_PINS_PER_PORT] = { 205bs335_gpio_config[BS335_GPIO_NUM_PORTS][U300_GPIO_PINS_PER_PORT] = {
100#ifdef CONFIG_MACH_U300_BS335
101 /* Port 0, pins 0-7 */ 206 /* Port 0, pins 0-7 */
102 { 207 {
103 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}, 208 U300_FLOATING_INPUT,
104 {GPIO_OUT, DEFAULT_OUTPUT_HIGH, DISABLE_PULL_UP}, 209 U300_OUTPUT_HIGH,
105 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}, 210 U300_FLOATING_INPUT,
106 {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}, 211 U300_OUTPUT_LOW,
107 {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}, 212 U300_OUTPUT_LOW,
108 {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}, 213 U300_OUTPUT_LOW,
109 {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}, 214 U300_OUTPUT_LOW,
110 {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP} 215 U300_OUTPUT_LOW,
111 }, 216 },
112 /* Port 1, pins 0-7 */ 217 /* Port 1, pins 0-7 */
113 { 218 {
114 {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}, 219 U300_OUTPUT_LOW,
115 {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}, 220 U300_OUTPUT_LOW,
116 {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}, 221 U300_OUTPUT_LOW,
117 {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP}, 222 U300_PULL_UP_INPUT,
118 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}, 223 U300_FLOATING_INPUT,
119 {GPIO_OUT, DEFAULT_OUTPUT_HIGH, DISABLE_PULL_UP}, 224 U300_OUTPUT_HIGH,
120 {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}, 225 U300_OUTPUT_LOW,
121 {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP} 226 U300_OUTPUT_LOW,
122 }, 227 },
123 /* Port 2, pins 0-7 */ 228 /* Port 2, pins 0-7 */
124 { 229 {
125 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}, 230 U300_FLOATING_INPUT,
126 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}, 231 U300_FLOATING_INPUT,
127 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}, 232 U300_FLOATING_INPUT,
128 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}, 233 U300_FLOATING_INPUT,
129 {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}, 234 U300_OUTPUT_LOW,
130 {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP}, 235 U300_PULL_UP_INPUT,
131 {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}, 236 U300_OUTPUT_LOW,
132 {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP} 237 U300_PULL_UP_INPUT,
133 }, 238 },
134 /* Port 3, pins 0-7 */ 239 /* Port 3, pins 0-7 */
135 { 240 {
136 {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP}, 241 U300_PULL_UP_INPUT,
137 {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}, 242 U300_OUTPUT_LOW,
138 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}, 243 U300_FLOATING_INPUT,
139 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}, 244 U300_FLOATING_INPUT,
140 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}, 245 U300_FLOATING_INPUT,
141 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}, 246 U300_FLOATING_INPUT,
142 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}, 247 U300_FLOATING_INPUT,
143 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP} 248 U300_FLOATING_INPUT,
144 }, 249 },
145 /* Port 4, pins 0-7 */ 250 /* Port 4, pins 0-7 */
146 { 251 {
147 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}, 252 U300_FLOATING_INPUT,
148 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}, 253 U300_FLOATING_INPUT,
149 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}, 254 U300_FLOATING_INPUT,
150 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}, 255 U300_FLOATING_INPUT,
151 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}, 256 U300_FLOATING_INPUT,
152 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}, 257 U300_FLOATING_INPUT,
153 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}, 258 U300_FLOATING_INPUT,
154 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP} 259 U300_FLOATING_INPUT,
155 }, 260 },
156 /* Port 5, pins 0-7 */ 261 /* Port 5, pins 0-7 */
157 { 262 {
158 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}, 263 U300_FLOATING_INPUT,
159 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}, 264 U300_FLOATING_INPUT,
160 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}, 265 U300_FLOATING_INPUT,
161 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}, 266 U300_FLOATING_INPUT,
162 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}, 267 U300_FLOATING_INPUT,
163 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}, 268 U300_FLOATING_INPUT,
164 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}, 269 U300_FLOATING_INPUT,
165 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP} 270 U300_FLOATING_INPUT,
166 }, 271 },
167 /* Port 6, pind 0-7 */ 272 /* Port 6, pind 0-7 */
168 { 273 {
169 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}, 274 U300_FLOATING_INPUT,
170 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}, 275 U300_FLOATING_INPUT,
171 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}, 276 U300_FLOATING_INPUT,
172 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}, 277 U300_FLOATING_INPUT,
173 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}, 278 U300_FLOATING_INPUT,
174 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}, 279 U300_FLOATING_INPUT,
175 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}, 280 U300_FLOATING_INPUT,
176 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP} 281 U300_FLOATING_INPUT,
177 } 282 }
178#endif 283};
179 284
180#ifdef CONFIG_MACH_U300_BS365 285static const struct __initdata u300_gpio_confdata
286bs365_gpio_config[BS365_GPIO_NUM_PORTS][U300_GPIO_PINS_PER_PORT] = {
181 /* Port 0, pins 0-7 */ 287 /* Port 0, pins 0-7 */
182 { 288 {
183 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}, 289 U300_FLOATING_INPUT,
184 {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}, 290 U300_OUTPUT_LOW,
185 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}, 291 U300_FLOATING_INPUT,
186 {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}, 292 U300_OUTPUT_LOW,
187 {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}, 293 U300_OUTPUT_LOW,
188 {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}, 294 U300_OUTPUT_LOW,
189 {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP}, 295 U300_PULL_UP_INPUT,
190 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP} 296 U300_FLOATING_INPUT,
191 }, 297 },
192 /* Port 1, pins 0-7 */ 298 /* Port 1, pins 0-7 */
193 { 299 {
194 {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}, 300 U300_OUTPUT_LOW,
195 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}, 301 U300_FLOATING_INPUT,
196 {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}, 302 U300_OUTPUT_LOW,
197 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}, 303 U300_FLOATING_INPUT,
198 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}, 304 U300_FLOATING_INPUT,
199 {GPIO_OUT, DEFAULT_OUTPUT_HIGH, DISABLE_PULL_UP}, 305 U300_OUTPUT_HIGH,
200 {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}, 306 U300_OUTPUT_LOW,
201 {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP} 307 U300_OUTPUT_LOW,
202 }, 308 },
203 /* Port 2, pins 0-7 */ 309 /* Port 2, pins 0-7 */
204 { 310 {
205 {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}, 311 U300_FLOATING_INPUT,
206 {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP}, 312 U300_PULL_UP_INPUT,
207 {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}, 313 U300_OUTPUT_LOW,
208 {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}, 314 U300_OUTPUT_LOW,
209 {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP}, 315 U300_PULL_UP_INPUT,
210 {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP}, 316 U300_PULL_UP_INPUT,
211 {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP}, 317 U300_PULL_UP_INPUT,
212 {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP} 318 U300_PULL_UP_INPUT,
213 }, 319 },
214 /* Port 3, pins 0-7 */ 320 /* Port 3, pins 0-7 */
215 { 321 {
216 {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP}, 322 U300_PULL_UP_INPUT,
217 {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP}, 323 U300_PULL_UP_INPUT,
218 {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP}, 324 U300_PULL_UP_INPUT,
219 {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP}, 325 U300_PULL_UP_INPUT,
220 {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP}, 326 U300_PULL_UP_INPUT,
221 {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP}, 327 U300_PULL_UP_INPUT,
222 {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP}, 328 U300_PULL_UP_INPUT,
223 {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP} 329 U300_PULL_UP_INPUT,
224 }, 330 },
225 /* Port 4, pins 0-7 */ 331 /* Port 4, pins 0-7 */
226 { 332 {
227 {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP}, 333 U300_PULL_UP_INPUT,
228 {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP}, 334 U300_PULL_UP_INPUT,
229 {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP}, 335 U300_PULL_UP_INPUT,
230 {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP}, 336 U300_PULL_UP_INPUT,
231 /* These 4 pins doesn't exist on DB3210 */ 337 /* These 4 pins doesn't exist on DB3210 */
232 {GPIO_OUT, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP}, 338 U300_OUTPUT_LOW,
233 {GPIO_OUT, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP}, 339 U300_OUTPUT_LOW,
234 {GPIO_OUT, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP}, 340 U300_OUTPUT_LOW,
235 {GPIO_OUT, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP} 341 U300_OUTPUT_LOW,
236 } 342 }
237#endif
238}; 343};
239#endif
240 344
241 345/**
242/* No users == we can power down GPIO */ 346 * to_u300_gpio() - get the pointer to u300_gpio
243static int gpio_users; 347 * @chip: the gpio chip member of the structure u300_gpio
244
245struct gpio_struct {
246 int (*callback)(void *);
247 void *data;
248 int users;
249};
250
251static struct gpio_struct gpio_pin[U300_GPIO_MAX];
252
253/*
254 * Let drivers register callback in order to get notified when there is
255 * an interrupt on the gpio pin
256 */ 348 */
257int gpio_register_callback(unsigned gpio, int (*func)(void *arg), void *data) 349static inline struct u300_gpio *to_u300_gpio(struct gpio_chip *chip)
258{ 350{
259 if (gpio_pin[gpio].callback) 351 return container_of(chip, struct u300_gpio, chip);
260 dev_warn(gpiodev, "%s: WARNING: callback already "
261 "registered for gpio pin#%d\n", __func__, gpio);
262 gpio_pin[gpio].callback = func;
263 gpio_pin[gpio].data = data;
264
265 return 0;
266} 352}
267EXPORT_SYMBOL(gpio_register_callback);
268 353
269int gpio_unregister_callback(unsigned gpio) 354static int u300_gpio_get(struct gpio_chip *chip, unsigned offset)
270{ 355{
271 if (!gpio_pin[gpio].callback) 356 struct u300_gpio *gpio = to_u300_gpio(chip);
272 dev_warn(gpiodev, "%s: WARNING: callback already "
273 "unregistered for gpio pin#%d\n", __func__, gpio);
274 gpio_pin[gpio].callback = NULL;
275 gpio_pin[gpio].data = NULL;
276 357
277 return 0; 358 return readl(U300_PIN_REG(offset, dir)) & U300_PIN_BIT(offset);
278} 359}
279EXPORT_SYMBOL(gpio_unregister_callback);
280 360
281/* Non-zero means valid */ 361static void u300_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
282int gpio_is_valid(int number)
283{ 362{
284 if (number >= 0 && 363 struct u300_gpio *gpio = to_u300_gpio(chip);
285 number < (U300_GPIO_NUM_PORTS * U300_GPIO_PINS_PER_PORT)) 364 unsigned long flags;
286 return 1; 365 u32 val;
287 return 0;
288}
289EXPORT_SYMBOL(gpio_is_valid);
290 366
291int gpio_request(unsigned gpio, const char *label) 367 local_irq_save(flags);
292{
293 if (gpio_pin[gpio].users)
294 return -EINVAL;
295 else
296 gpio_pin[gpio].users++;
297 368
298 gpio_users++; 369 val = readl(U300_PIN_REG(offset, dor));
370 if (value)
371 writel(val | U300_PIN_BIT(offset), U300_PIN_REG(offset, dor));
372 else
373 writel(val & ~U300_PIN_BIT(offset), U300_PIN_REG(offset, dor));
299 374
300 return 0; 375 local_irq_restore(flags);
301} 376}
302EXPORT_SYMBOL(gpio_request);
303 377
304void gpio_free(unsigned gpio) 378static int u300_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
305{ 379{
306 gpio_users--; 380 struct u300_gpio *gpio = to_u300_gpio(chip);
307 gpio_pin[gpio].users--; 381 unsigned long flags;
308 if (unlikely(gpio_pin[gpio].users < 0)) { 382 u32 val;
309 dev_warn(gpiodev, "warning: gpio#%d release mismatch\n",
310 gpio);
311 gpio_pin[gpio].users = 0;
312 }
313
314 return;
315}
316EXPORT_SYMBOL(gpio_free);
317 383
318/* This returns zero or nonzero */ 384 local_irq_save(flags);
319int gpio_get_value(unsigned gpio) 385 val = readl(U300_PIN_REG(offset, pcr));
320{ 386 /* Mask out this pin, note 2 bits per setting */
321 return readl(virtbase + U300_GPIO_PXPDIR + 387 val &= ~(U300_GPIO_PXPCR_PIN_MODE_MASK << ((offset & 0x07) << 1));
322 PIN_TO_PORT(gpio) * U300_GPIO_PORTX_SPACING) & (1 << (gpio & 0x07)); 388 writel(val, U300_PIN_REG(offset, pcr));
389 local_irq_restore(flags);
390 return 0;
323} 391}
324EXPORT_SYMBOL(gpio_get_value);
325 392
326/* 393static int u300_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
327 * We hope that the compiler will optimize away the unused branch 394 int value)
328 * in case "value" is a constant
329 */
330void gpio_set_value(unsigned gpio, int value)
331{ 395{
332 u32 val; 396 struct u300_gpio *gpio = to_u300_gpio(chip);
333 unsigned long flags; 397 unsigned long flags;
398 u32 oldmode;
399 u32 val;
334 400
335 local_irq_save(flags); 401 local_irq_save(flags);
336 if (value) { 402 val = readl(U300_PIN_REG(offset, pcr));
337 /* set */ 403 /*
338 val = readl(virtbase + U300_GPIO_PXPDOR + 404 * Drive mode must be set by the special mode set function, set
339 PIN_TO_PORT(gpio) * U300_GPIO_PORTX_SPACING) 405 * push/pull mode by default if no mode has been selected.
340 & (1 << (gpio & 0x07)); 406 */
341 writel(val | (1 << (gpio & 0x07)), virtbase + 407 oldmode = val & (U300_GPIO_PXPCR_PIN_MODE_MASK <<
342 U300_GPIO_PXPDOR + 408 ((offset & 0x07) << 1));
343 PIN_TO_PORT(gpio) * U300_GPIO_PORTX_SPACING); 409 /* mode = 0 means input, else some mode is already set */
344 } else { 410 if (oldmode == 0) {
345 /* clear */ 411 val &= ~(U300_GPIO_PXPCR_PIN_MODE_MASK <<
346 val = readl(virtbase + U300_GPIO_PXPDOR + 412 ((offset & 0x07) << 1));
347 PIN_TO_PORT(gpio) * U300_GPIO_PORTX_SPACING) 413 val |= (U300_GPIO_PXPCR_PIN_MODE_OUTPUT_PUSH_PULL
348 & (1 << (gpio & 0x07)); 414 << ((offset & 0x07) << 1));
349 writel(val & ~(1 << (gpio & 0x07)), virtbase + 415 writel(val, U300_PIN_REG(offset, pcr));
350 U300_GPIO_PXPDOR +
351 PIN_TO_PORT(gpio) * U300_GPIO_PORTX_SPACING);
352 } 416 }
417 u300_gpio_set(chip, offset, value);
353 local_irq_restore(flags); 418 local_irq_restore(flags);
419 return 0;
354} 420}
355EXPORT_SYMBOL(gpio_set_value);
356 421
357int gpio_direction_input(unsigned gpio) 422static int u300_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
358{ 423{
424 struct u300_gpio *gpio = to_u300_gpio(chip);
425 int retirq = gpio->irq_base + offset;
426
427 dev_dbg(gpio->dev, "request IRQ for GPIO %d, return %d\n", offset,
428 retirq);
429 return retirq;
430}
431
432static int u300_gpio_config(struct gpio_chip *chip, unsigned offset,
433 u16 param, unsigned long *data)
434{
435 struct u300_gpio *gpio = to_u300_gpio(chip);
359 unsigned long flags; 436 unsigned long flags;
360 u32 val; 437 u32 val;
361 438
362 if (gpio > U300_GPIO_MAX)
363 return -EINVAL;
364
365 local_irq_save(flags); 439 local_irq_save(flags);
366 val = readl(virtbase + U300_GPIO_PXPCR + PIN_TO_PORT(gpio) * 440 switch (param) {
367 U300_GPIO_PORTX_SPACING); 441 case GPIO_U300_CONFIG_BIAS_UNKNOWN:
368 /* Mask out this pin*/ 442 case GPIO_U300_CONFIG_BIAS_FLOAT:
369 val &= ~(U300_GPIO_PXPCR_PIN_MODE_MASK << ((gpio & 0x07) << 1)); 443 val = readl(U300_PIN_REG(offset, per));
370 /* This is not needed since it sets the bits to zero.*/ 444 writel(val | U300_PIN_BIT(offset), U300_PIN_REG(offset, per));
371 /* val |= (U300_GPIO_PXPCR_PIN_MODE_INPUT << (gpio*2)); */ 445 break;
372 writel(val, virtbase + U300_GPIO_PXPCR + PIN_TO_PORT(gpio) * 446 case GPIO_U300_CONFIG_BIAS_PULL_UP:
373 U300_GPIO_PORTX_SPACING); 447 val = readl(U300_PIN_REG(offset, per));
448 writel(val & ~U300_PIN_BIT(offset), U300_PIN_REG(offset, per));
449 break;
450 case GPIO_U300_CONFIG_DRIVE_PUSH_PULL:
451 val = readl(U300_PIN_REG(offset, pcr));
452 val &= ~(U300_GPIO_PXPCR_PIN_MODE_MASK
453 << ((offset & 0x07) << 1));
454 val |= (U300_GPIO_PXPCR_PIN_MODE_OUTPUT_PUSH_PULL
455 << ((offset & 0x07) << 1));
456 writel(val, U300_PIN_REG(offset, pcr));
457 break;
458 case GPIO_U300_CONFIG_DRIVE_OPEN_DRAIN:
459 val = readl(U300_PIN_REG(offset, pcr));
460 val &= ~(U300_GPIO_PXPCR_PIN_MODE_MASK
461 << ((offset & 0x07) << 1));
462 val |= (U300_GPIO_PXPCR_PIN_MODE_OUTPUT_OPEN_DRAIN
463 << ((offset & 0x07) << 1));
464 writel(val, U300_PIN_REG(offset, pcr));
465 break;
466 case GPIO_U300_CONFIG_DRIVE_OPEN_SOURCE:
467 val = readl(U300_PIN_REG(offset, pcr));
468 val &= ~(U300_GPIO_PXPCR_PIN_MODE_MASK
469 << ((offset & 0x07) << 1));
470 val |= (U300_GPIO_PXPCR_PIN_MODE_OUTPUT_OPEN_SOURCE
471 << ((offset & 0x07) << 1));
472 writel(val, U300_PIN_REG(offset, pcr));
473 break;
474 default:
475 local_irq_restore(flags);
476 dev_err(gpio->dev, "illegal configuration requested\n");
477 return -EINVAL;
478 }
374 local_irq_restore(flags); 479 local_irq_restore(flags);
375 return 0; 480 return 0;
376} 481}
377EXPORT_SYMBOL(gpio_direction_input);
378 482
379int gpio_direction_output(unsigned gpio, int value) 483static struct gpio_chip u300_gpio_chip = {
484 .label = "u300-gpio-chip",
485 .owner = THIS_MODULE,
486 .get = u300_gpio_get,
487 .set = u300_gpio_set,
488 .direction_input = u300_gpio_direction_input,
489 .direction_output = u300_gpio_direction_output,
490 .to_irq = u300_gpio_to_irq,
491};
492
493static void u300_toggle_trigger(struct u300_gpio *gpio, unsigned offset)
380{ 494{
381 unsigned long flags;
382 u32 val; 495 u32 val;
383 496
384 if (gpio > U300_GPIO_MAX) 497 val = readl(U300_PIN_REG(offset, icr));
385 return -EINVAL; 498 /* Set mode depending on state */
386 499 if (u300_gpio_get(&gpio->chip, offset)) {
387 local_irq_save(flags); 500 /* High now, let's trigger on falling edge next then */
388 val = readl(virtbase + U300_GPIO_PXPCR + PIN_TO_PORT(gpio) * 501 writel(val & ~U300_PIN_BIT(offset), U300_PIN_REG(offset, icr));
389 U300_GPIO_PORTX_SPACING); 502 dev_dbg(gpio->dev, "next IRQ on falling edge on pin %d\n",
390 /* Mask out this pin */ 503 offset);
391 val &= ~(U300_GPIO_PXPCR_PIN_MODE_MASK << ((gpio & 0x07) << 1)); 504 } else {
392 /* 505 /* Low now, let's trigger on rising edge next then */
393 * FIXME: configure for push/pull, open drain or open source per pin 506 writel(val | U300_PIN_BIT(offset), U300_PIN_REG(offset, icr));
394 * in setup. The current driver will only support push/pull. 507 dev_dbg(gpio->dev, "next IRQ on rising edge on pin %d\n",
395 */ 508 offset);
396 val |= (U300_GPIO_PXPCR_PIN_MODE_OUTPUT_PUSH_PULL 509 }
397 << ((gpio & 0x07) << 1));
398 writel(val, virtbase + U300_GPIO_PXPCR + PIN_TO_PORT(gpio) *
399 U300_GPIO_PORTX_SPACING);
400 gpio_set_value(gpio, value);
401 local_irq_restore(flags);
402 return 0;
403} 510}
404EXPORT_SYMBOL(gpio_direction_output);
405 511
406/* 512static int u300_gpio_irq_type(struct irq_data *d, unsigned trigger)
407 * Enable an IRQ, edge is rising edge (!= 0) or falling edge (==0).
408 */
409void enable_irq_on_gpio_pin(unsigned gpio, int edge)
410{ 513{
514 struct u300_gpio_port *port = irq_data_get_irq_chip_data(d);
515 struct u300_gpio *gpio = port->gpio;
516 int offset = d->irq - gpio->irq_base;
411 u32 val; 517 u32 val;
412 unsigned long flags;
413 local_irq_save(flags);
414 518
415 val = readl(virtbase + U300_GPIO_PXIEN + PIN_TO_PORT(gpio) * 519 if ((trigger & IRQF_TRIGGER_RISING) &&
416 U300_GPIO_PORTX_SPACING); 520 (trigger & IRQF_TRIGGER_FALLING)) {
417 val |= (1 << (gpio & 0x07)); 521 /*
418 writel(val, virtbase + U300_GPIO_PXIEN + PIN_TO_PORT(gpio) * 522 * The GPIO block can only trigger on falling OR rising edges,
419 U300_GPIO_PORTX_SPACING); 523 * not both. So we need to toggle the mode whenever the pin
420 val = readl(virtbase + U300_GPIO_PXICR + PIN_TO_PORT(gpio) * 524 * goes from one state to the other with a special state flag
421 U300_GPIO_PORTX_SPACING); 525 */
422 if (edge) 526 dev_dbg(gpio->dev,
423 val |= (1 << (gpio & 0x07)); 527 "trigger on both rising and falling edge on pin %d\n",
424 else 528 offset);
425 val &= ~(1 << (gpio & 0x07)); 529 port->toggle_edge_mode |= U300_PIN_BIT(offset);
426 writel(val, virtbase + U300_GPIO_PXICR + PIN_TO_PORT(gpio) * 530 u300_toggle_trigger(gpio, offset);
427 U300_GPIO_PORTX_SPACING); 531 } else if (trigger & IRQF_TRIGGER_RISING) {
428 local_irq_restore(flags); 532 dev_dbg(gpio->dev, "trigger on rising edge on pin %d\n",
533 offset);
534 val = readl(U300_PIN_REG(offset, icr));
535 writel(val | U300_PIN_BIT(offset), U300_PIN_REG(offset, icr));
536 port->toggle_edge_mode &= ~U300_PIN_BIT(offset);
537 } else if (trigger & IRQF_TRIGGER_FALLING) {
538 dev_dbg(gpio->dev, "trigger on falling edge on pin %d\n",
539 offset);
540 val = readl(U300_PIN_REG(offset, icr));
541 writel(val & ~U300_PIN_BIT(offset), U300_PIN_REG(offset, icr));
542 port->toggle_edge_mode &= ~U300_PIN_BIT(offset);
543 }
544
545 return 0;
429} 546}
430EXPORT_SYMBOL(enable_irq_on_gpio_pin);
431 547
432void disable_irq_on_gpio_pin(unsigned gpio) 548static void u300_gpio_irq_enable(struct irq_data *d)
433{ 549{
550 struct u300_gpio_port *port = irq_data_get_irq_chip_data(d);
551 struct u300_gpio *gpio = port->gpio;
552 int offset = d->irq - gpio->irq_base;
434 u32 val; 553 u32 val;
435 unsigned long flags; 554 unsigned long flags;
436 555
437 local_irq_save(flags); 556 local_irq_save(flags);
438 val = readl(virtbase + U300_GPIO_PXIEN + PIN_TO_PORT(gpio) * 557 val = readl(U300_PIN_REG(offset, ien));
439 U300_GPIO_PORTX_SPACING); 558 writel(val | U300_PIN_BIT(offset), U300_PIN_REG(offset, ien));
440 val &= ~(1 << (gpio & 0x07));
441 writel(val, virtbase + U300_GPIO_PXIEN + PIN_TO_PORT(gpio) *
442 U300_GPIO_PORTX_SPACING);
443 local_irq_restore(flags); 559 local_irq_restore(flags);
444} 560}
445EXPORT_SYMBOL(disable_irq_on_gpio_pin);
446 561
447/* Enable (value == 0) or disable (value == 1) internal pullup */ 562static void u300_gpio_irq_disable(struct irq_data *d)
448void gpio_pullup(unsigned gpio, int value)
449{ 563{
564 struct u300_gpio_port *port = irq_data_get_irq_chip_data(d);
565 struct u300_gpio *gpio = port->gpio;
566 int offset = d->irq - gpio->irq_base;
450 u32 val; 567 u32 val;
451 unsigned long flags; 568 unsigned long flags;
452 569
453 local_irq_save(flags); 570 local_irq_save(flags);
454 if (value) { 571 val = readl(U300_PIN_REG(offset, ien));
455 val = readl(virtbase + U300_GPIO_PXPER + PIN_TO_PORT(gpio) * 572 writel(val & ~U300_PIN_BIT(offset), U300_PIN_REG(offset, ien));
456 U300_GPIO_PORTX_SPACING);
457 writel(val | (1 << (gpio & 0x07)), virtbase + U300_GPIO_PXPER +
458 PIN_TO_PORT(gpio) * U300_GPIO_PORTX_SPACING);
459 } else {
460 val = readl(virtbase + U300_GPIO_PXPER + PIN_TO_PORT(gpio) *
461 U300_GPIO_PORTX_SPACING);
462 writel(val & ~(1 << (gpio & 0x07)), virtbase + U300_GPIO_PXPER +
463 PIN_TO_PORT(gpio) * U300_GPIO_PORTX_SPACING);
464 }
465 local_irq_restore(flags); 573 local_irq_restore(flags);
466} 574}
467EXPORT_SYMBOL(gpio_pullup);
468 575
469static irqreturn_t gpio_irq_handler(int irq, void *dev_id) 576static struct irq_chip u300_gpio_irqchip = {
577 .name = "u300-gpio-irqchip",
578 .irq_enable = u300_gpio_irq_enable,
579 .irq_disable = u300_gpio_irq_disable,
580 .irq_set_type = u300_gpio_irq_type,
581
582};
583
584static void u300_gpio_irq_handler(unsigned irq, struct irq_desc *desc)
470{ 585{
471 struct u300_gpio_port *port = dev_id; 586 struct u300_gpio_port *port = irq_get_handler_data(irq);
472 u32 val; 587 struct u300_gpio *gpio = port->gpio;
473 int pin; 588 int pinoffset = port->number << 3; /* get the right stride */
589 unsigned long val;
474 590
591 desc->irq_data.chip->irq_ack(&desc->irq_data);
475 /* Read event register */ 592 /* Read event register */
476 val = readl(virtbase + U300_GPIO_PXIEV + port->number * 593 val = readl(U300_PIN_REG(pinoffset, iev));
477 U300_GPIO_PORTX_SPACING);
478 /* Mask with enable register */
479 val &= readl(virtbase + U300_GPIO_PXIEV + port->number *
480 U300_GPIO_PORTX_SPACING);
481 /* Mask relevant bits */ 594 /* Mask relevant bits */
482 val &= U300_GPIO_PXIEV_ALL_IRQ_EVENT_MASK; 595 val &= 0xFFU; /* 8 bits per port */
483 /* ACK IRQ (clear event) */ 596 /* ACK IRQ (clear event) */
484 writel(val, virtbase + U300_GPIO_PXIEV + port->number * 597 writel(val, U300_PIN_REG(pinoffset, iev));
485 U300_GPIO_PORTX_SPACING); 598
486 /* Print message */ 599 /* Call IRQ handler */
487 while (val != 0) { 600 if (val != 0) {
488 unsigned gpio; 601 int irqoffset;
489 602
490 pin = __ffs(val); 603 for_each_set_bit(irqoffset, &val, U300_GPIO_PINS_PER_PORT) {
491 /* mask off this pin */ 604 int pin_irq = gpio->irq_base + (port->number << 3)
492 val &= ~(1 << pin); 605 + irqoffset;
493 gpio = (port->number << 3) + pin; 606 int offset = pinoffset + irqoffset;
494 607
495 if (gpio_pin[gpio].callback) 608 dev_dbg(gpio->dev, "GPIO IRQ %d on pin %d\n",
496 (void)gpio_pin[gpio].callback(gpio_pin[gpio].data); 609 pin_irq, offset);
497 else 610 generic_handle_irq(pin_irq);
498 dev_dbg(gpiodev, "stray GPIO IRQ on line %d\n", 611 /*
499 gpio); 612 * Triggering IRQ on both rising and falling edge
613 * needs mockery
614 */
615 if (port->toggle_edge_mode & U300_PIN_BIT(offset))
616 u300_toggle_trigger(gpio, offset);
617 }
500 } 618 }
501 return IRQ_HANDLED; 619
620 desc->irq_data.chip->irq_unmask(&desc->irq_data);
502} 621}
503 622
504static void gpio_set_initial_values(void) 623static void __init u300_gpio_init_pin(struct u300_gpio *gpio,
624 int offset,
625 const struct u300_gpio_confdata *conf)
505{ 626{
506#ifdef U300_COH901571_3 627 /* Set mode: input or output */
507 int i, j; 628 if (conf->output) {
508 unsigned long flags; 629 u300_gpio_direction_output(&gpio->chip, offset, conf->outval);
509 u32 val;
510 630
511 /* Write default values to all pins */ 631 /* Deactivate bias mode for output */
512 for (i = 0; i < U300_GPIO_NUM_PORTS; i++) { 632 u300_gpio_config(&gpio->chip, offset,
513 val = 0; 633 GPIO_U300_CONFIG_BIAS_FLOAT,
514 for (j = 0; j < 8; j++) 634 NULL);
515 val |= (u32) (u300_gpio_config[i][j].default_output_value != DEFAULT_OUTPUT_LOW) << j; 635
516 local_irq_save(flags); 636 /* Set drive mode for output */
517 writel(val, virtbase + U300_GPIO_PXPDOR + i * U300_GPIO_PORTX_SPACING); 637 u300_gpio_config(&gpio->chip, offset,
518 local_irq_restore(flags); 638 GPIO_U300_CONFIG_DRIVE_PUSH_PULL, NULL);
639
640 dev_dbg(gpio->dev, "set up pin %d as output, value: %d\n",
641 offset, conf->outval);
642 } else {
643 u300_gpio_direction_input(&gpio->chip, offset);
644
645 /* Always set output low on input pins */
646 u300_gpio_set(&gpio->chip, offset, 0);
647
648 /* Set bias mode for input */
649 u300_gpio_config(&gpio->chip, offset, conf->bias_mode, NULL);
650
651 dev_dbg(gpio->dev, "set up pin %d as input, bias: %04x\n",
652 offset, conf->bias_mode);
519 } 653 }
654}
520 655
521 /* 656static void __init u300_gpio_init_coh901571(struct u300_gpio *gpio,
522 * Put all pins that are set to either 'GPIO_OUT' or 'GPIO_NOT_USED' 657 struct u300_gpio_platform *plat)
523 * to output and 'GPIO_IN' to input for each port. And initialize 658{
524 * default value on outputs. 659 int i, j;
525 */ 660
526 for (i = 0; i < U300_GPIO_NUM_PORTS; i++) { 661 /* Write default config and values to all pins */
527 for (j = 0; j < U300_GPIO_PINS_PER_PORT; j++) { 662 for (i = 0; i < plat->ports; i++) {
528 local_irq_save(flags); 663 for (j = 0; j < 8; j++) {
529 val = readl(virtbase + U300_GPIO_PXPCR + 664 const struct u300_gpio_confdata *conf;
530 i * U300_GPIO_PORTX_SPACING); 665 int offset = (i*8) + j;
531 /* Mask out this pin */ 666
532 val &= ~(U300_GPIO_PXPCR_PIN_MODE_MASK << (j << 1)); 667 if (plat->variant == U300_GPIO_COH901571_3_BS335)
533 668 conf = &bs335_gpio_config[i][j];
534 if (u300_gpio_config[i][j].pin_usage != GPIO_IN) 669 else if (plat->variant == U300_GPIO_COH901571_3_BS365)
535 val |= (U300_GPIO_PXPCR_PIN_MODE_OUTPUT_PUSH_PULL << (j << 1)); 670 conf = &bs365_gpio_config[i][j];
536 writel(val, virtbase + U300_GPIO_PXPCR + 671 else
537 i * U300_GPIO_PORTX_SPACING); 672 break;
538 local_irq_restore(flags); 673
674 u300_gpio_init_pin(gpio, offset, conf);
539 } 675 }
540 } 676 }
677}
541 678
542 /* Enable or disable the internal pull-ups in the GPIO ASIC block */ 679static inline void u300_gpio_free_ports(struct u300_gpio *gpio)
543 for (i = 0; i < U300_GPIO_MAX; i++) { 680{
544 val = 0; 681 struct u300_gpio_port *port;
545 for (j = 0; j < 8; j++) 682 struct list_head *p, *n;
546 val |= (u32)((u300_gpio_config[i][j].pull_up == DISABLE_PULL_UP) << j); 683
547 local_irq_save(flags); 684 list_for_each_safe(p, n, &gpio->port_list) {
548 writel(val, virtbase + U300_GPIO_PXPER + i * U300_GPIO_PORTX_SPACING); 685 port = list_entry(p, struct u300_gpio_port, node);
549 local_irq_restore(flags); 686 list_del(&port->node);
687 free_irq(port->irq, port);
688 kfree(port);
550 } 689 }
551#endif
552} 690}
553 691
554static int __init gpio_probe(struct platform_device *pdev) 692static int __init u300_gpio_probe(struct platform_device *pdev)
555{ 693{
556 u32 val; 694 struct u300_gpio_platform *plat = dev_get_platdata(&pdev->dev);
695 struct u300_gpio *gpio;
557 int err = 0; 696 int err = 0;
697 int portno;
698 u32 val;
699 u32 ifr;
558 int i; 700 int i;
559 int num_irqs;
560 701
561 gpiodev = &pdev->dev; 702 gpio = kzalloc(sizeof(struct u300_gpio), GFP_KERNEL);
562 memset(gpio_pin, 0, sizeof(gpio_pin)); 703 if (gpio == NULL) {
704 dev_err(&pdev->dev, "failed to allocate memory\n");
705 return -ENOMEM;
706 }
707
708 gpio->chip = u300_gpio_chip;
709 gpio->chip.ngpio = plat->ports * U300_GPIO_PINS_PER_PORT;
710 gpio->irq_base = plat->gpio_irq_base;
711 gpio->chip.dev = &pdev->dev;
712 gpio->chip.base = plat->gpio_base;
713 gpio->dev = &pdev->dev;
563 714
564 /* Get GPIO clock */ 715 /* Get GPIO clock */
565 clk = clk_get(&pdev->dev, NULL); 716 gpio->clk = clk_get(gpio->dev, NULL);
566 if (IS_ERR(clk)) { 717 if (IS_ERR(gpio->clk)) {
567 err = PTR_ERR(clk); 718 err = PTR_ERR(gpio->clk);
568 dev_err(gpiodev, "could not get GPIO clock\n"); 719 dev_err(gpio->dev, "could not get GPIO clock\n");
569 goto err_no_clk; 720 goto err_no_clk;
570 } 721 }
571 err = clk_enable(clk); 722 err = clk_enable(gpio->clk);
572 if (err) { 723 if (err) {
573 dev_err(gpiodev, "could not enable GPIO clock\n"); 724 dev_err(gpio->dev, "could not enable GPIO clock\n");
574 goto err_no_clk_enable; 725 goto err_no_clk_enable;
575 } 726 }
576 727
577 memres = platform_get_resource(pdev, IORESOURCE_MEM, 0); 728 gpio->memres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
578 if (!memres) 729 if (!gpio->memres) {
730 dev_err(gpio->dev, "could not get GPIO memory resource\n");
731 err = -ENODEV;
579 goto err_no_resource; 732 goto err_no_resource;
733 }
580 734
581 if (!request_mem_region(memres->start, resource_size(memres), 735 if (!request_mem_region(gpio->memres->start,
736 resource_size(gpio->memres),
582 "GPIO Controller")) { 737 "GPIO Controller")) {
583 err = -ENODEV; 738 err = -ENODEV;
584 goto err_no_ioregion; 739 goto err_no_ioregion;
585 } 740 }
586 741
587 virtbase = ioremap(memres->start, resource_size(memres)); 742 gpio->base = ioremap(gpio->memres->start, resource_size(gpio->memres));
588 if (!virtbase) { 743 if (!gpio->base) {
589 err = -ENOMEM; 744 err = -ENOMEM;
590 goto err_no_ioremap; 745 goto err_no_ioremap;
591 } 746 }
592 dev_info(gpiodev, "remapped 0x%08x to %p\n", 747
593 memres->start, virtbase); 748 if (plat->variant == U300_GPIO_COH901335) {
594 749 dev_info(gpio->dev,
595#ifdef U300_COH901335 750 "initializing GPIO Controller COH 901 335\n");
596 dev_info(gpiodev, "initializing GPIO Controller COH 901 335\n"); 751 gpio->stride = U300_335_PORT_STRIDE;
597 /* Turn on the GPIO block */ 752 gpio->pcr = U300_335_PXPCR;
598 writel(U300_GPIO_CR_BLOCK_CLOCK_ENABLE, virtbase + U300_GPIO_CR); 753 gpio->dor = U300_335_PXPDOR;
599#endif 754 gpio->dir = U300_335_PXPDIR;
600 755 gpio->per = U300_335_PXPER;
601#ifdef U300_COH901571_3 756 gpio->icr = U300_335_PXICR;
602 dev_info(gpiodev, "initializing GPIO Controller COH 901 571/3\n"); 757 gpio->ien = U300_335_PXIEN;
603 val = readl(virtbase + U300_GPIO_CR); 758 gpio->iev = U300_335_PXIEV;
604 dev_info(gpiodev, "COH901571/3 block version: %d, " \ 759 ifr = U300_335_PXIFR;
605 "number of cores: %d\n", 760
606 ((val & 0x0000FE00) >> 9), 761 /* Turn on the GPIO block */
607 ((val & 0x000001FC) >> 2)); 762 writel(U300_335_CR_BLOCK_CLOCK_ENABLE,
608 writel(U300_GPIO_CR_BLOCK_CLKRQ_ENABLE, virtbase + U300_GPIO_CR); 763 gpio->base + U300_335_CR);
609#endif 764 } else if (plat->variant == U300_GPIO_COH901571_3_BS335 ||
610 765 plat->variant == U300_GPIO_COH901571_3_BS365) {
611 gpio_set_initial_values(); 766 dev_info(gpio->dev,
612 767 "initializing GPIO Controller COH 901 571/3\n");
613 for (num_irqs = 0 ; num_irqs < U300_GPIO_NUM_PORTS; num_irqs++) { 768 gpio->stride = U300_571_PORT_STRIDE;
614 769 gpio->pcr = U300_571_PXPCR;
615 gpio_ports[num_irqs].irq = 770 gpio->dor = U300_571_PXPDOR;
616 platform_get_irq_byname(pdev, 771 gpio->dir = U300_571_PXPDIR;
617 gpio_ports[num_irqs].name); 772 gpio->per = U300_571_PXPER;
618 773 gpio->icr = U300_571_PXICR;
619 err = request_irq(gpio_ports[num_irqs].irq, 774 gpio->ien = U300_571_PXIEN;
620 gpio_irq_handler, IRQF_DISABLED, 775 gpio->iev = U300_571_PXIEV;
621 gpio_ports[num_irqs].name, 776 ifr = U300_571_PXIFR;
622 &gpio_ports[num_irqs]); 777
623 if (err) { 778 val = readl(gpio->base + U300_571_CR);
624 dev_err(gpiodev, "cannot allocate IRQ for %s!\n", 779 dev_info(gpio->dev, "COH901571/3 block version: %d, " \
625 gpio_ports[num_irqs].name); 780 "number of cores: %d totalling %d pins\n",
626 goto err_no_irq; 781 ((val & 0x000001FC) >> 2),
782 ((val & 0x0000FE00) >> 9),
783 ((val & 0x0000FE00) >> 9) * 8);
784 writel(U300_571_CR_BLOCK_CLKRQ_ENABLE,
785 gpio->base + U300_571_CR);
786 u300_gpio_init_coh901571(gpio, plat);
787 } else {
788 dev_err(gpio->dev, "unknown block variant\n");
789 err = -ENODEV;
790 goto err_unknown_variant;
791 }
792
793 /* Add each port with its IRQ separately */
794 INIT_LIST_HEAD(&gpio->port_list);
795 for (portno = 0 ; portno < plat->ports; portno++) {
796 struct u300_gpio_port *port =
797 kmalloc(sizeof(struct u300_gpio_port), GFP_KERNEL);
798
799 if (!port) {
800 dev_err(gpio->dev, "out of memory\n");
801 err = -ENOMEM;
802 goto err_no_port;
627 } 803 }
628 /* Turns off PortX_irq_force */ 804
629 writel(0x0, virtbase + U300_GPIO_PXIFR + 805 snprintf(port->name, 8, "gpio%d", portno);
630 num_irqs * U300_GPIO_PORTX_SPACING); 806 port->number = portno;
807 port->gpio = gpio;
808
809 port->irq = platform_get_irq_byname(pdev,
810 port->name);
811
812 dev_dbg(gpio->dev, "register IRQ %d for %s\n", port->irq,
813 port->name);
814
815 irq_set_chained_handler(port->irq, u300_gpio_irq_handler);
816 irq_set_handler_data(port->irq, port);
817
818 /* For each GPIO pin set the unique IRQ handler */
819 for (i = 0; i < U300_GPIO_PINS_PER_PORT; i++) {
820 int irqno = gpio->irq_base + (portno << 3) + i;
821
822 dev_dbg(gpio->dev, "handler for IRQ %d on %s\n",
823 irqno, port->name);
824 irq_set_chip_and_handler(irqno, &u300_gpio_irqchip,
825 handle_simple_irq);
826 set_irq_flags(irqno, IRQF_VALID);
827 irq_set_chip_data(irqno, port);
828 }
829
830 /* Turns off irq force (test register) for this port */
831 writel(0x0, gpio->base + portno * gpio->stride + ifr);
832
833 list_add_tail(&port->node, &gpio->port_list);
631 } 834 }
835 dev_dbg(gpio->dev, "initialized %d GPIO ports\n", portno);
836
837 err = gpiochip_add(&gpio->chip);
838 if (err) {
839 dev_err(gpio->dev, "unable to add gpiochip: %d\n", err);
840 goto err_no_chip;
841 }
842
843 platform_set_drvdata(pdev, gpio);
632 844
633 return 0; 845 return 0;
634 846
635 err_no_irq: 847err_no_chip:
636 for (i = 0; i < num_irqs; i++) 848err_no_port:
637 free_irq(gpio_ports[i].irq, &gpio_ports[i]); 849 u300_gpio_free_ports(gpio);
638 iounmap(virtbase); 850err_unknown_variant:
639 err_no_ioremap: 851 iounmap(gpio->base);
640 release_mem_region(memres->start, resource_size(memres)); 852err_no_ioremap:
641 err_no_ioregion: 853 release_mem_region(gpio->memres->start, resource_size(gpio->memres));
642 err_no_resource: 854err_no_ioregion:
643 clk_disable(clk); 855err_no_resource:
644 err_no_clk_enable: 856 clk_disable(gpio->clk);
645 clk_put(clk); 857err_no_clk_enable:
646 err_no_clk: 858 clk_put(gpio->clk);
647 dev_info(gpiodev, "module ERROR:%d\n", err); 859err_no_clk:
860 kfree(gpio);
861 dev_info(&pdev->dev, "module ERROR:%d\n", err);
648 return err; 862 return err;
649} 863}
650 864
651static int __exit gpio_remove(struct platform_device *pdev) 865static int __exit u300_gpio_remove(struct platform_device *pdev)
652{ 866{
653 int i; 867 struct u300_gpio_platform *plat = dev_get_platdata(&pdev->dev);
868 struct u300_gpio *gpio = platform_get_drvdata(pdev);
869 int err;
654 870
655 /* Turn off the GPIO block */ 871 /* Turn off the GPIO block */
656 writel(0x00000000U, virtbase + U300_GPIO_CR); 872 if (plat->variant == U300_GPIO_COH901335)
657 for (i = 0 ; i < U300_GPIO_NUM_PORTS; i++) 873 writel(0x00000000U, gpio->base + U300_335_CR);
658 free_irq(gpio_ports[i].irq, &gpio_ports[i]); 874 if (plat->variant == U300_GPIO_COH901571_3_BS335 ||
659 iounmap(virtbase); 875 plat->variant == U300_GPIO_COH901571_3_BS365)
660 release_mem_region(memres->start, resource_size(memres)); 876 writel(0x00000000U, gpio->base + U300_571_CR);
661 clk_disable(clk); 877
662 clk_put(clk); 878 err = gpiochip_remove(&gpio->chip);
879 if (err < 0) {
880 dev_err(gpio->dev, "unable to remove gpiochip: %d\n", err);
881 return err;
882 }
883 u300_gpio_free_ports(gpio);
884 iounmap(gpio->base);
885 release_mem_region(gpio->memres->start,
886 resource_size(gpio->memres));
887 clk_disable(gpio->clk);
888 clk_put(gpio->clk);
889 platform_set_drvdata(pdev, NULL);
890 kfree(gpio);
663 return 0; 891 return 0;
664} 892}
665 893
666static struct platform_driver gpio_driver = { 894static struct platform_driver u300_gpio_driver = {
667 .driver = { 895 .driver = {
668 .name = "u300-gpio", 896 .name = "u300-gpio",
669 }, 897 },
670 .remove = __exit_p(gpio_remove), 898 .remove = __exit_p(u300_gpio_remove),
671}; 899};
672 900
673 901
674static int __init u300_gpio_init(void) 902static int __init u300_gpio_init(void)
675{ 903{
676 return platform_driver_probe(&gpio_driver, gpio_probe); 904 return platform_driver_probe(&u300_gpio_driver, u300_gpio_probe);
677} 905}
678 906
679static void __exit u300_gpio_exit(void) 907static void __exit u300_gpio_exit(void)
680{ 908{
681 platform_driver_unregister(&gpio_driver); 909 platform_driver_unregister(&u300_gpio_driver);
682} 910}
683 911
684arch_initcall(u300_gpio_init); 912arch_initcall(u300_gpio_init);
685module_exit(u300_gpio_exit); 913module_exit(u300_gpio_exit);
686 914
687MODULE_AUTHOR("Linus Walleij <linus.walleij@stericsson.com>"); 915MODULE_AUTHOR("Linus Walleij <linus.walleij@stericsson.com>");
688 916MODULE_DESCRIPTION("ST-Ericsson AB COH 901 335/COH 901 571/3 GPIO driver");
689#ifdef U300_COH901571_3
690MODULE_DESCRIPTION("ST-Ericsson AB COH 901 571/3 GPIO driver");
691#endif
692
693#ifdef U300_COH901335
694MODULE_DESCRIPTION("ST-Ericsson AB COH 901 335 GPIO driver");
695#endif
696
697MODULE_LICENSE("GPL"); 917MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 82db18506662..fe738f05309b 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -499,6 +499,7 @@ void drm_connector_cleanup(struct drm_connector *connector)
499 mutex_lock(&dev->mode_config.mutex); 499 mutex_lock(&dev->mode_config.mutex);
500 drm_mode_object_put(dev, &connector->base); 500 drm_mode_object_put(dev, &connector->base);
501 list_del(&connector->head); 501 list_del(&connector->head);
502 dev->mode_config.num_connector--;
502 mutex_unlock(&dev->mode_config.mutex); 503 mutex_unlock(&dev->mode_config.mutex);
503} 504}
504EXPORT_SYMBOL(drm_connector_cleanup); 505EXPORT_SYMBOL(drm_connector_cleanup);
@@ -529,6 +530,7 @@ void drm_encoder_cleanup(struct drm_encoder *encoder)
529 mutex_lock(&dev->mode_config.mutex); 530 mutex_lock(&dev->mode_config.mutex);
530 drm_mode_object_put(dev, &encoder->base); 531 drm_mode_object_put(dev, &encoder->base);
531 list_del(&encoder->head); 532 list_del(&encoder->head);
533 dev->mode_config.num_encoder--;
532 mutex_unlock(&dev->mode_config.mutex); 534 mutex_unlock(&dev->mode_config.mutex);
533} 535}
534EXPORT_SYMBOL(drm_encoder_cleanup); 536EXPORT_SYMBOL(drm_encoder_cleanup);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 802b61ac3139..f7c6854eb4dd 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -256,7 +256,6 @@ int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed,
256{ 256{
257 printk(KERN_ERR "panic occurred, switching back to text console\n"); 257 printk(KERN_ERR "panic occurred, switching back to text console\n");
258 return drm_fb_helper_force_kernel_mode(); 258 return drm_fb_helper_force_kernel_mode();
259 return 0;
260} 259}
261EXPORT_SYMBOL(drm_fb_helper_panic); 260EXPORT_SYMBOL(drm_fb_helper_panic);
262 261
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index a8ab6263e0d7..3c395a59da35 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -499,7 +499,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
499 seq_printf(m, "Interrupts received: %d\n", 499 seq_printf(m, "Interrupts received: %d\n",
500 atomic_read(&dev_priv->irq_received)); 500 atomic_read(&dev_priv->irq_received));
501 for (i = 0; i < I915_NUM_RINGS; i++) { 501 for (i = 0; i < I915_NUM_RINGS; i++) {
502 if (IS_GEN6(dev)) { 502 if (IS_GEN6(dev) || IS_GEN7(dev)) {
503 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n", 503 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
504 dev_priv->ring[i].name, 504 dev_priv->ring[i].name,
505 I915_READ_IMR(&dev_priv->ring[i])); 505 I915_READ_IMR(&dev_priv->ring[i]));
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index ce045a8cf82c..f07e4252b708 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -67,11 +67,11 @@ module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
67MODULE_PARM_DESC(i915_enable_rc6, 67MODULE_PARM_DESC(i915_enable_rc6,
68 "Enable power-saving render C-state 6 (default: true)"); 68 "Enable power-saving render C-state 6 (default: true)");
69 69
70unsigned int i915_enable_fbc __read_mostly = 1; 70unsigned int i915_enable_fbc __read_mostly = -1;
71module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600); 71module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);
72MODULE_PARM_DESC(i915_enable_fbc, 72MODULE_PARM_DESC(i915_enable_fbc,
73 "Enable frame buffer compression for power savings " 73 "Enable frame buffer compression for power savings "
74 "(default: false)"); 74 "(default: -1 (use per-chip default))");
75 75
76unsigned int i915_lvds_downclock __read_mostly = 0; 76unsigned int i915_lvds_downclock __read_mostly = 0;
77module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); 77module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index feb4f164fd1b..7916bd97d5c1 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -36,6 +36,7 @@
36#include <linux/io-mapping.h> 36#include <linux/io-mapping.h>
37#include <linux/i2c.h> 37#include <linux/i2c.h>
38#include <drm/intel-gtt.h> 38#include <drm/intel-gtt.h>
39#include <linux/backlight.h>
39 40
40/* General customization: 41/* General customization:
41 */ 42 */
@@ -690,6 +691,7 @@ typedef struct drm_i915_private {
690 int child_dev_num; 691 int child_dev_num;
691 struct child_device_config *child_dev; 692 struct child_device_config *child_dev;
692 struct drm_connector *int_lvds_connector; 693 struct drm_connector *int_lvds_connector;
694 struct drm_connector *int_edp_connector;
693 695
694 bool mchbar_need_disable; 696 bool mchbar_need_disable;
695 697
@@ -723,6 +725,8 @@ typedef struct drm_i915_private {
723 /* list of fbdev register on this device */ 725 /* list of fbdev register on this device */
724 struct intel_fbdev *fbdev; 726 struct intel_fbdev *fbdev;
725 727
728 struct backlight_device *backlight;
729
726 struct drm_property *broadcast_rgb_property; 730 struct drm_property *broadcast_rgb_property;
727 struct drm_property *force_audio_property; 731 struct drm_property *force_audio_property;
728 732
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 02f96fd0d52d..9cbb0cd8f46a 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -2058,8 +2058,10 @@ void intel_irq_init(struct drm_device *dev)
2058 dev->driver->get_vblank_counter = gm45_get_vblank_counter; 2058 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
2059 } 2059 }
2060 2060
2061 2061 if (drm_core_check_feature(dev, DRIVER_MODESET))
2062 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; 2062 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
2063 else
2064 dev->driver->get_vblank_timestamp = NULL;
2063 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 2065 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
2064 2066
2065 if (IS_IVYBRIDGE(dev)) { 2067 if (IS_IVYBRIDGE(dev)) {
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index d1331f771e2f..542453f7498c 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -375,6 +375,7 @@
375# define MI_FLUSH_ENABLE (1 << 11) 375# define MI_FLUSH_ENABLE (1 << 11)
376 376
377#define GFX_MODE 0x02520 377#define GFX_MODE 0x02520
378#define GFX_MODE_GEN7 0x0229c
378#define GFX_RUN_LIST_ENABLE (1<<15) 379#define GFX_RUN_LIST_ENABLE (1<<15)
379#define GFX_TLB_INVALIDATE_ALWAYS (1<<13) 380#define GFX_TLB_INVALIDATE_ALWAYS (1<<13)
380#define GFX_SURFACE_FAULT_ENABLE (1<<12) 381#define GFX_SURFACE_FAULT_ENABLE (1<<12)
@@ -382,6 +383,9 @@
382#define GFX_PSMI_GRANULARITY (1<<10) 383#define GFX_PSMI_GRANULARITY (1<<10)
383#define GFX_PPGTT_ENABLE (1<<9) 384#define GFX_PPGTT_ENABLE (1<<9)
384 385
386#define GFX_MODE_ENABLE(bit) (((bit) << 16) | (bit))
387#define GFX_MODE_DISABLE(bit) (((bit) << 16) | (0))
388
385#define SCPD0 0x0209c /* 915+ only */ 389#define SCPD0 0x0209c /* 915+ only */
386#define IER 0x020a0 390#define IER 0x020a0
387#define IIR 0x020a4 391#define IIR 0x020a4
@@ -1318,6 +1322,7 @@
1318#define ADPA_PIPE_SELECT_MASK (1<<30) 1322#define ADPA_PIPE_SELECT_MASK (1<<30)
1319#define ADPA_PIPE_A_SELECT 0 1323#define ADPA_PIPE_A_SELECT 0
1320#define ADPA_PIPE_B_SELECT (1<<30) 1324#define ADPA_PIPE_B_SELECT (1<<30)
1325#define ADPA_PIPE_SELECT(pipe) ((pipe) << 30)
1321#define ADPA_USE_VGA_HVPOLARITY (1<<15) 1326#define ADPA_USE_VGA_HVPOLARITY (1<<15)
1322#define ADPA_SETS_HVPOLARITY 0 1327#define ADPA_SETS_HVPOLARITY 0
1323#define ADPA_VSYNC_CNTL_DISABLE (1<<11) 1328#define ADPA_VSYNC_CNTL_DISABLE (1<<11)
@@ -1460,6 +1465,7 @@
1460/* Selects pipe B for LVDS data. Must be set on pre-965. */ 1465/* Selects pipe B for LVDS data. Must be set on pre-965. */
1461#define LVDS_PIPEB_SELECT (1 << 30) 1466#define LVDS_PIPEB_SELECT (1 << 30)
1462#define LVDS_PIPE_MASK (1 << 30) 1467#define LVDS_PIPE_MASK (1 << 30)
1468#define LVDS_PIPE(pipe) ((pipe) << 30)
1463/* LVDS dithering flag on 965/g4x platform */ 1469/* LVDS dithering flag on 965/g4x platform */
1464#define LVDS_ENABLE_DITHER (1 << 25) 1470#define LVDS_ENABLE_DITHER (1 << 25)
1465/* LVDS sync polarity flags. Set to invert (i.e. negative) */ 1471/* LVDS sync polarity flags. Set to invert (i.e. negative) */
@@ -1499,9 +1505,6 @@
1499#define LVDS_B0B3_POWER_DOWN (0 << 2) 1505#define LVDS_B0B3_POWER_DOWN (0 << 2)
1500#define LVDS_B0B3_POWER_UP (3 << 2) 1506#define LVDS_B0B3_POWER_UP (3 << 2)
1501 1507
1502#define LVDS_PIPE_ENABLED(V, P) \
1503 (((V) & (LVDS_PIPE_MASK | LVDS_PORT_EN)) == ((P) << 30 | LVDS_PORT_EN))
1504
1505/* Video Data Island Packet control */ 1508/* Video Data Island Packet control */
1506#define VIDEO_DIP_DATA 0x61178 1509#define VIDEO_DIP_DATA 0x61178
1507#define VIDEO_DIP_CTL 0x61170 1510#define VIDEO_DIP_CTL 0x61170
@@ -3256,14 +3259,12 @@
3256#define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17) 3259#define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17)
3257#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16) 3260#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
3258 3261
3259#define ADPA_PIPE_ENABLED(V, P) \
3260 (((V) & (ADPA_TRANS_SELECT_MASK | ADPA_DAC_ENABLE)) == ((P) << 30 | ADPA_DAC_ENABLE))
3261
3262/* or SDVOB */ 3262/* or SDVOB */
3263#define HDMIB 0xe1140 3263#define HDMIB 0xe1140
3264#define PORT_ENABLE (1 << 31) 3264#define PORT_ENABLE (1 << 31)
3265#define TRANSCODER_A (0) 3265#define TRANSCODER_A (0)
3266#define TRANSCODER_B (1 << 30) 3266#define TRANSCODER_B (1 << 30)
3267#define TRANSCODER(pipe) ((pipe) << 30)
3267#define TRANSCODER_MASK (1 << 30) 3268#define TRANSCODER_MASK (1 << 30)
3268#define COLOR_FORMAT_8bpc (0) 3269#define COLOR_FORMAT_8bpc (0)
3269#define COLOR_FORMAT_12bpc (3 << 26) 3270#define COLOR_FORMAT_12bpc (3 << 26)
@@ -3280,9 +3281,6 @@
3280#define HSYNC_ACTIVE_HIGH (1 << 3) 3281#define HSYNC_ACTIVE_HIGH (1 << 3)
3281#define PORT_DETECTED (1 << 2) 3282#define PORT_DETECTED (1 << 2)
3282 3283
3283#define HDMI_PIPE_ENABLED(V, P) \
3284 (((V) & (TRANSCODER_MASK | PORT_ENABLE)) == ((P) << 30 | PORT_ENABLE))
3285
3286/* PCH SDVOB multiplex with HDMIB */ 3284/* PCH SDVOB multiplex with HDMIB */
3287#define PCH_SDVOB HDMIB 3285#define PCH_SDVOB HDMIB
3288 3286
@@ -3349,6 +3347,7 @@
3349#define PORT_TRANS_B_SEL_CPT (1<<29) 3347#define PORT_TRANS_B_SEL_CPT (1<<29)
3350#define PORT_TRANS_C_SEL_CPT (2<<29) 3348#define PORT_TRANS_C_SEL_CPT (2<<29)
3351#define PORT_TRANS_SEL_MASK (3<<29) 3349#define PORT_TRANS_SEL_MASK (3<<29)
3350#define PORT_TRANS_SEL_CPT(pipe) ((pipe) << 29)
3352 3351
3353#define TRANS_DP_CTL_A 0xe0300 3352#define TRANS_DP_CTL_A 0xe0300
3354#define TRANS_DP_CTL_B 0xe1300 3353#define TRANS_DP_CTL_B 0xe1300
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 87677d60d0df..f10742359ec9 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -871,7 +871,8 @@ int i915_restore_state(struct drm_device *dev)
871 } 871 }
872 mutex_unlock(&dev->struct_mutex); 872 mutex_unlock(&dev->struct_mutex);
873 873
874 intel_init_clock_gating(dev); 874 if (drm_core_check_feature(dev, DRIVER_MODESET))
875 intel_init_clock_gating(dev);
875 876
876 if (IS_IRONLAKE_M(dev)) { 877 if (IS_IRONLAKE_M(dev)) {
877 ironlake_enable_drps(dev); 878 ironlake_enable_drps(dev);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 35364e68a091..04411ad2e779 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -878,7 +878,7 @@ static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
878 int pp_reg, lvds_reg; 878 int pp_reg, lvds_reg;
879 u32 val; 879 u32 val;
880 enum pipe panel_pipe = PIPE_A; 880 enum pipe panel_pipe = PIPE_A;
881 bool locked = locked; 881 bool locked = true;
882 882
883 if (HAS_PCH_SPLIT(dev_priv->dev)) { 883 if (HAS_PCH_SPLIT(dev_priv->dev)) {
884 pp_reg = PCH_PP_CONTROL; 884 pp_reg = PCH_PP_CONTROL;
@@ -980,8 +980,8 @@ static void assert_transcoder_disabled(struct drm_i915_private *dev_priv,
980 pipe_name(pipe)); 980 pipe_name(pipe));
981} 981}
982 982
983static bool dp_pipe_enabled(struct drm_i915_private *dev_priv, enum pipe pipe, 983static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
984 int reg, u32 port_sel, u32 val) 984 enum pipe pipe, u32 port_sel, u32 val)
985{ 985{
986 if ((val & DP_PORT_EN) == 0) 986 if ((val & DP_PORT_EN) == 0)
987 return false; 987 return false;
@@ -998,11 +998,58 @@ static bool dp_pipe_enabled(struct drm_i915_private *dev_priv, enum pipe pipe,
998 return true; 998 return true;
999} 999}
1000 1000
1001static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1002 enum pipe pipe, u32 val)
1003{
1004 if ((val & PORT_ENABLE) == 0)
1005 return false;
1006
1007 if (HAS_PCH_CPT(dev_priv->dev)) {
1008 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1009 return false;
1010 } else {
1011 if ((val & TRANSCODER_MASK) != TRANSCODER(pipe))
1012 return false;
1013 }
1014 return true;
1015}
1016
1017static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1018 enum pipe pipe, u32 val)
1019{
1020 if ((val & LVDS_PORT_EN) == 0)
1021 return false;
1022
1023 if (HAS_PCH_CPT(dev_priv->dev)) {
1024 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1025 return false;
1026 } else {
1027 if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1028 return false;
1029 }
1030 return true;
1031}
1032
1033static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1034 enum pipe pipe, u32 val)
1035{
1036 if ((val & ADPA_DAC_ENABLE) == 0)
1037 return false;
1038 if (HAS_PCH_CPT(dev_priv->dev)) {
1039 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1040 return false;
1041 } else {
1042 if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1043 return false;
1044 }
1045 return true;
1046}
1047
1001static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, 1048static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1002 enum pipe pipe, int reg, u32 port_sel) 1049 enum pipe pipe, int reg, u32 port_sel)
1003{ 1050{
1004 u32 val = I915_READ(reg); 1051 u32 val = I915_READ(reg);
1005 WARN(dp_pipe_enabled(dev_priv, pipe, reg, port_sel, val), 1052 WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
1006 "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", 1053 "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1007 reg, pipe_name(pipe)); 1054 reg, pipe_name(pipe));
1008} 1055}
@@ -1011,7 +1058,7 @@ static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1011 enum pipe pipe, int reg) 1058 enum pipe pipe, int reg)
1012{ 1059{
1013 u32 val = I915_READ(reg); 1060 u32 val = I915_READ(reg);
1014 WARN(HDMI_PIPE_ENABLED(val, pipe), 1061 WARN(hdmi_pipe_enabled(dev_priv, val, pipe),
1015 "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", 1062 "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1016 reg, pipe_name(pipe)); 1063 reg, pipe_name(pipe));
1017} 1064}
@@ -1028,13 +1075,13 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1028 1075
1029 reg = PCH_ADPA; 1076 reg = PCH_ADPA;
1030 val = I915_READ(reg); 1077 val = I915_READ(reg);
1031 WARN(ADPA_PIPE_ENABLED(val, pipe), 1078 WARN(adpa_pipe_enabled(dev_priv, val, pipe),
1032 "PCH VGA enabled on transcoder %c, should be disabled\n", 1079 "PCH VGA enabled on transcoder %c, should be disabled\n",
1033 pipe_name(pipe)); 1080 pipe_name(pipe));
1034 1081
1035 reg = PCH_LVDS; 1082 reg = PCH_LVDS;
1036 val = I915_READ(reg); 1083 val = I915_READ(reg);
1037 WARN(LVDS_PIPE_ENABLED(val, pipe), 1084 WARN(lvds_pipe_enabled(dev_priv, val, pipe),
1038 "PCH LVDS enabled on transcoder %c, should be disabled\n", 1085 "PCH LVDS enabled on transcoder %c, should be disabled\n",
1039 pipe_name(pipe)); 1086 pipe_name(pipe));
1040 1087
@@ -1360,7 +1407,7 @@ static void disable_pch_dp(struct drm_i915_private *dev_priv,
1360 enum pipe pipe, int reg, u32 port_sel) 1407 enum pipe pipe, int reg, u32 port_sel)
1361{ 1408{
1362 u32 val = I915_READ(reg); 1409 u32 val = I915_READ(reg);
1363 if (dp_pipe_enabled(dev_priv, pipe, reg, port_sel, val)) { 1410 if (dp_pipe_enabled(dev_priv, pipe, port_sel, val)) {
1364 DRM_DEBUG_KMS("Disabling pch dp %x on pipe %d\n", reg, pipe); 1411 DRM_DEBUG_KMS("Disabling pch dp %x on pipe %d\n", reg, pipe);
1365 I915_WRITE(reg, val & ~DP_PORT_EN); 1412 I915_WRITE(reg, val & ~DP_PORT_EN);
1366 } 1413 }
@@ -1370,7 +1417,7 @@ static void disable_pch_hdmi(struct drm_i915_private *dev_priv,
1370 enum pipe pipe, int reg) 1417 enum pipe pipe, int reg)
1371{ 1418{
1372 u32 val = I915_READ(reg); 1419 u32 val = I915_READ(reg);
1373 if (HDMI_PIPE_ENABLED(val, pipe)) { 1420 if (hdmi_pipe_enabled(dev_priv, val, pipe)) {
1374 DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n", 1421 DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n",
1375 reg, pipe); 1422 reg, pipe);
1376 I915_WRITE(reg, val & ~PORT_ENABLE); 1423 I915_WRITE(reg, val & ~PORT_ENABLE);
@@ -1392,12 +1439,13 @@ static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
1392 1439
1393 reg = PCH_ADPA; 1440 reg = PCH_ADPA;
1394 val = I915_READ(reg); 1441 val = I915_READ(reg);
1395 if (ADPA_PIPE_ENABLED(val, pipe)) 1442 if (adpa_pipe_enabled(dev_priv, val, pipe))
1396 I915_WRITE(reg, val & ~ADPA_DAC_ENABLE); 1443 I915_WRITE(reg, val & ~ADPA_DAC_ENABLE);
1397 1444
1398 reg = PCH_LVDS; 1445 reg = PCH_LVDS;
1399 val = I915_READ(reg); 1446 val = I915_READ(reg);
1400 if (LVDS_PIPE_ENABLED(val, pipe)) { 1447 if (lvds_pipe_enabled(dev_priv, val, pipe)) {
1448 DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val);
1401 I915_WRITE(reg, val & ~LVDS_PORT_EN); 1449 I915_WRITE(reg, val & ~LVDS_PORT_EN);
1402 POSTING_READ(reg); 1450 POSTING_READ(reg);
1403 udelay(100); 1451 udelay(100);
@@ -1751,6 +1799,7 @@ static void intel_update_fbc(struct drm_device *dev)
1751 struct drm_framebuffer *fb; 1799 struct drm_framebuffer *fb;
1752 struct intel_framebuffer *intel_fb; 1800 struct intel_framebuffer *intel_fb;
1753 struct drm_i915_gem_object *obj; 1801 struct drm_i915_gem_object *obj;
1802 int enable_fbc;
1754 1803
1755 DRM_DEBUG_KMS("\n"); 1804 DRM_DEBUG_KMS("\n");
1756 1805
@@ -1791,8 +1840,15 @@ static void intel_update_fbc(struct drm_device *dev)
1791 intel_fb = to_intel_framebuffer(fb); 1840 intel_fb = to_intel_framebuffer(fb);
1792 obj = intel_fb->obj; 1841 obj = intel_fb->obj;
1793 1842
1794 if (!i915_enable_fbc) { 1843 enable_fbc = i915_enable_fbc;
1795 DRM_DEBUG_KMS("fbc disabled per module param (default off)\n"); 1844 if (enable_fbc < 0) {
1845 DRM_DEBUG_KMS("fbc set to per-chip default\n");
1846 enable_fbc = 1;
1847 if (INTEL_INFO(dev)->gen <= 5)
1848 enable_fbc = 0;
1849 }
1850 if (!enable_fbc) {
1851 DRM_DEBUG_KMS("fbc disabled per module param\n");
1796 dev_priv->no_fbc_reason = FBC_MODULE_PARAM; 1852 dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
1797 goto out_disable; 1853 goto out_disable;
1798 } 1854 }
@@ -4639,13 +4695,13 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
4639 bpc = 6; /* min is 18bpp */ 4695 bpc = 6; /* min is 18bpp */
4640 break; 4696 break;
4641 case 24: 4697 case 24:
4642 bpc = min((unsigned int)8, display_bpc); 4698 bpc = 8;
4643 break; 4699 break;
4644 case 30: 4700 case 30:
4645 bpc = min((unsigned int)10, display_bpc); 4701 bpc = 10;
4646 break; 4702 break;
4647 case 48: 4703 case 48:
4648 bpc = min((unsigned int)12, display_bpc); 4704 bpc = 12;
4649 break; 4705 break;
4650 default: 4706 default:
4651 DRM_DEBUG("unsupported depth, assuming 24 bits\n"); 4707 DRM_DEBUG("unsupported depth, assuming 24 bits\n");
@@ -4653,10 +4709,12 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
4653 break; 4709 break;
4654 } 4710 }
4655 4711
4712 display_bpc = min(display_bpc, bpc);
4713
4656 DRM_DEBUG_DRIVER("setting pipe bpc to %d (max display bpc %d)\n", 4714 DRM_DEBUG_DRIVER("setting pipe bpc to %d (max display bpc %d)\n",
4657 bpc, display_bpc); 4715 bpc, display_bpc);
4658 4716
4659 *pipe_bpp = bpc * 3; 4717 *pipe_bpp = display_bpc * 3;
4660 4718
4661 return display_bpc != bpc; 4719 return display_bpc != bpc;
4662} 4720}
@@ -5049,6 +5107,81 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
5049 return ret; 5107 return ret;
5050} 5108}
5051 5109
5110static void ironlake_update_pch_refclk(struct drm_device *dev)
5111{
5112 struct drm_i915_private *dev_priv = dev->dev_private;
5113 struct drm_mode_config *mode_config = &dev->mode_config;
5114 struct drm_crtc *crtc;
5115 struct intel_encoder *encoder;
5116 struct intel_encoder *has_edp_encoder = NULL;
5117 u32 temp;
5118 bool has_lvds = false;
5119
5120 /* We need to take the global config into account */
5121 list_for_each_entry(crtc, &mode_config->crtc_list, head) {
5122 if (!crtc->enabled)
5123 continue;
5124
5125 list_for_each_entry(encoder, &mode_config->encoder_list,
5126 base.head) {
5127 if (encoder->base.crtc != crtc)
5128 continue;
5129
5130 switch (encoder->type) {
5131 case INTEL_OUTPUT_LVDS:
5132 has_lvds = true;
5133 case INTEL_OUTPUT_EDP:
5134 has_edp_encoder = encoder;
5135 break;
5136 }
5137 }
5138 }
5139
5140 /* Ironlake: try to setup display ref clock before DPLL
5141 * enabling. This is only under driver's control after
5142 * PCH B stepping, previous chipset stepping should be
5143 * ignoring this setting.
5144 */
5145 temp = I915_READ(PCH_DREF_CONTROL);
5146 /* Always enable nonspread source */
5147 temp &= ~DREF_NONSPREAD_SOURCE_MASK;
5148 temp |= DREF_NONSPREAD_SOURCE_ENABLE;
5149 temp &= ~DREF_SSC_SOURCE_MASK;
5150 temp |= DREF_SSC_SOURCE_ENABLE;
5151 I915_WRITE(PCH_DREF_CONTROL, temp);
5152
5153 POSTING_READ(PCH_DREF_CONTROL);
5154 udelay(200);
5155
5156 if (has_edp_encoder) {
5157 if (intel_panel_use_ssc(dev_priv)) {
5158 temp |= DREF_SSC1_ENABLE;
5159 I915_WRITE(PCH_DREF_CONTROL, temp);
5160
5161 POSTING_READ(PCH_DREF_CONTROL);
5162 udelay(200);
5163 }
5164 temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5165
5166 /* Enable CPU source on CPU attached eDP */
5167 if (!intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5168 if (intel_panel_use_ssc(dev_priv))
5169 temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
5170 else
5171 temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
5172 } else {
5173 /* Enable SSC on PCH eDP if needed */
5174 if (intel_panel_use_ssc(dev_priv)) {
5175 DRM_ERROR("enabling SSC on PCH\n");
5176 temp |= DREF_SUPERSPREAD_SOURCE_ENABLE;
5177 }
5178 }
5179 I915_WRITE(PCH_DREF_CONTROL, temp);
5180 POSTING_READ(PCH_DREF_CONTROL);
5181 udelay(200);
5182 }
5183}
5184
5052static int ironlake_crtc_mode_set(struct drm_crtc *crtc, 5185static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5053 struct drm_display_mode *mode, 5186 struct drm_display_mode *mode,
5054 struct drm_display_mode *adjusted_mode, 5187 struct drm_display_mode *adjusted_mode,
@@ -5244,49 +5377,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5244 ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw, 5377 ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw,
5245 &m_n); 5378 &m_n);
5246 5379
5247 /* Ironlake: try to setup display ref clock before DPLL 5380 ironlake_update_pch_refclk(dev);
5248 * enabling. This is only under driver's control after
5249 * PCH B stepping, previous chipset stepping should be
5250 * ignoring this setting.
5251 */
5252 temp = I915_READ(PCH_DREF_CONTROL);
5253 /* Always enable nonspread source */
5254 temp &= ~DREF_NONSPREAD_SOURCE_MASK;
5255 temp |= DREF_NONSPREAD_SOURCE_ENABLE;
5256 temp &= ~DREF_SSC_SOURCE_MASK;
5257 temp |= DREF_SSC_SOURCE_ENABLE;
5258 I915_WRITE(PCH_DREF_CONTROL, temp);
5259
5260 POSTING_READ(PCH_DREF_CONTROL);
5261 udelay(200);
5262
5263 if (has_edp_encoder) {
5264 if (intel_panel_use_ssc(dev_priv)) {
5265 temp |= DREF_SSC1_ENABLE;
5266 I915_WRITE(PCH_DREF_CONTROL, temp);
5267
5268 POSTING_READ(PCH_DREF_CONTROL);
5269 udelay(200);
5270 }
5271 temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5272
5273 /* Enable CPU source on CPU attached eDP */
5274 if (!intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5275 if (intel_panel_use_ssc(dev_priv))
5276 temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
5277 else
5278 temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
5279 } else {
5280 /* Enable SSC on PCH eDP if needed */
5281 if (intel_panel_use_ssc(dev_priv)) {
5282 DRM_ERROR("enabling SSC on PCH\n");
5283 temp |= DREF_SUPERSPREAD_SOURCE_ENABLE;
5284 }
5285 }
5286 I915_WRITE(PCH_DREF_CONTROL, temp);
5287 POSTING_READ(PCH_DREF_CONTROL);
5288 udelay(200);
5289 }
5290 5381
5291 fp = clock.n << 16 | clock.m1 << 8 | clock.m2; 5382 fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
5292 if (has_reduced_clock) 5383 if (has_reduced_clock)
@@ -7157,8 +7248,6 @@ static void intel_setup_outputs(struct drm_device *dev)
7157 intel_encoder_clones(dev, encoder->clone_mask); 7248 intel_encoder_clones(dev, encoder->clone_mask);
7158 } 7249 }
7159 7250
7160 intel_panel_setup_backlight(dev);
7161
7162 /* disable all the possible outputs/crtcs before entering KMS mode */ 7251 /* disable all the possible outputs/crtcs before entering KMS mode */
7163 drm_helper_disable_unused_functions(dev); 7252 drm_helper_disable_unused_functions(dev);
7164} 7253}
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 0feae908bb37..44fef5e1c490 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1841,6 +1841,11 @@ done:
1841static void 1841static void
1842intel_dp_destroy (struct drm_connector *connector) 1842intel_dp_destroy (struct drm_connector *connector)
1843{ 1843{
1844 struct drm_device *dev = connector->dev;
1845
1846 if (intel_dpd_is_edp(dev))
1847 intel_panel_destroy_backlight(dev);
1848
1844 drm_sysfs_connector_remove(connector); 1849 drm_sysfs_connector_remove(connector);
1845 drm_connector_cleanup(connector); 1850 drm_connector_cleanup(connector);
1846 kfree(connector); 1851 kfree(connector);
@@ -2072,6 +2077,8 @@ intel_dp_init(struct drm_device *dev, int output_reg)
2072 DRM_MODE_TYPE_PREFERRED; 2077 DRM_MODE_TYPE_PREFERRED;
2073 } 2078 }
2074 } 2079 }
2080 dev_priv->int_edp_connector = connector;
2081 intel_panel_setup_backlight(dev);
2075 } 2082 }
2076 2083
2077 intel_dp_add_properties(intel_dp, connector); 2084 intel_dp_add_properties(intel_dp, connector);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 7b330e76a435..fe1099d8817e 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -297,9 +297,10 @@ extern void intel_pch_panel_fitting(struct drm_device *dev,
297extern u32 intel_panel_get_max_backlight(struct drm_device *dev); 297extern u32 intel_panel_get_max_backlight(struct drm_device *dev);
298extern u32 intel_panel_get_backlight(struct drm_device *dev); 298extern u32 intel_panel_get_backlight(struct drm_device *dev);
299extern void intel_panel_set_backlight(struct drm_device *dev, u32 level); 299extern void intel_panel_set_backlight(struct drm_device *dev, u32 level);
300extern void intel_panel_setup_backlight(struct drm_device *dev); 300extern int intel_panel_setup_backlight(struct drm_device *dev);
301extern void intel_panel_enable_backlight(struct drm_device *dev); 301extern void intel_panel_enable_backlight(struct drm_device *dev);
302extern void intel_panel_disable_backlight(struct drm_device *dev); 302extern void intel_panel_disable_backlight(struct drm_device *dev);
303extern void intel_panel_destroy_backlight(struct drm_device *dev);
303extern enum drm_connector_status intel_panel_detect(struct drm_device *dev); 304extern enum drm_connector_status intel_panel_detect(struct drm_device *dev);
304 305
305extern void intel_crtc_load_lut(struct drm_crtc *crtc); 306extern void intel_crtc_load_lut(struct drm_crtc *crtc);
@@ -336,9 +337,6 @@ extern void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
336 struct drm_connector *connector, 337 struct drm_connector *connector,
337 struct intel_load_detect_pipe *old); 338 struct intel_load_detect_pipe *old);
338 339
339extern struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB);
340extern int intel_sdvo_supports_hotplug(struct drm_connector *connector);
341extern void intel_sdvo_set_hotplug(struct drm_connector *connector, int enable);
342extern void intelfb_restore(void); 340extern void intelfb_restore(void);
343extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, 341extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
344 u16 blue, int regno); 342 u16 blue, int regno);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 2e8ddfcba40c..31da77f5c051 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -72,14 +72,16 @@ static void intel_lvds_enable(struct intel_lvds *intel_lvds)
72{ 72{
73 struct drm_device *dev = intel_lvds->base.base.dev; 73 struct drm_device *dev = intel_lvds->base.base.dev;
74 struct drm_i915_private *dev_priv = dev->dev_private; 74 struct drm_i915_private *dev_priv = dev->dev_private;
75 u32 ctl_reg, lvds_reg; 75 u32 ctl_reg, lvds_reg, stat_reg;
76 76
77 if (HAS_PCH_SPLIT(dev)) { 77 if (HAS_PCH_SPLIT(dev)) {
78 ctl_reg = PCH_PP_CONTROL; 78 ctl_reg = PCH_PP_CONTROL;
79 lvds_reg = PCH_LVDS; 79 lvds_reg = PCH_LVDS;
80 stat_reg = PCH_PP_STATUS;
80 } else { 81 } else {
81 ctl_reg = PP_CONTROL; 82 ctl_reg = PP_CONTROL;
82 lvds_reg = LVDS; 83 lvds_reg = LVDS;
84 stat_reg = PP_STATUS;
83 } 85 }
84 86
85 I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN); 87 I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN);
@@ -94,17 +96,16 @@ static void intel_lvds_enable(struct intel_lvds *intel_lvds)
94 DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n", 96 DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n",
95 intel_lvds->pfit_control, 97 intel_lvds->pfit_control,
96 intel_lvds->pfit_pgm_ratios); 98 intel_lvds->pfit_pgm_ratios);
97 if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000)) { 99
98 DRM_ERROR("timed out waiting for panel to power off\n"); 100 I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios);
99 } else { 101 I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control);
100 I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios); 102 intel_lvds->pfit_dirty = false;
101 I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control);
102 intel_lvds->pfit_dirty = false;
103 }
104 } 103 }
105 104
106 I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON); 105 I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
107 POSTING_READ(lvds_reg); 106 POSTING_READ(lvds_reg);
107 if (wait_for((I915_READ(stat_reg) & PP_ON) != 0, 1000))
108 DRM_ERROR("timed out waiting for panel to power on\n");
108 109
109 intel_panel_enable_backlight(dev); 110 intel_panel_enable_backlight(dev);
110} 111}
@@ -113,24 +114,25 @@ static void intel_lvds_disable(struct intel_lvds *intel_lvds)
113{ 114{
114 struct drm_device *dev = intel_lvds->base.base.dev; 115 struct drm_device *dev = intel_lvds->base.base.dev;
115 struct drm_i915_private *dev_priv = dev->dev_private; 116 struct drm_i915_private *dev_priv = dev->dev_private;
116 u32 ctl_reg, lvds_reg; 117 u32 ctl_reg, lvds_reg, stat_reg;
117 118
118 if (HAS_PCH_SPLIT(dev)) { 119 if (HAS_PCH_SPLIT(dev)) {
119 ctl_reg = PCH_PP_CONTROL; 120 ctl_reg = PCH_PP_CONTROL;
120 lvds_reg = PCH_LVDS; 121 lvds_reg = PCH_LVDS;
122 stat_reg = PCH_PP_STATUS;
121 } else { 123 } else {
122 ctl_reg = PP_CONTROL; 124 ctl_reg = PP_CONTROL;
123 lvds_reg = LVDS; 125 lvds_reg = LVDS;
126 stat_reg = PP_STATUS;
124 } 127 }
125 128
126 intel_panel_disable_backlight(dev); 129 intel_panel_disable_backlight(dev);
127 130
128 I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON); 131 I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
132 if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000))
133 DRM_ERROR("timed out waiting for panel to power off\n");
129 134
130 if (intel_lvds->pfit_control) { 135 if (intel_lvds->pfit_control) {
131 if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000))
132 DRM_ERROR("timed out waiting for panel to power off\n");
133
134 I915_WRITE(PFIT_CONTROL, 0); 136 I915_WRITE(PFIT_CONTROL, 0);
135 intel_lvds->pfit_dirty = true; 137 intel_lvds->pfit_dirty = true;
136 } 138 }
@@ -398,53 +400,21 @@ out:
398 400
399static void intel_lvds_prepare(struct drm_encoder *encoder) 401static void intel_lvds_prepare(struct drm_encoder *encoder)
400{ 402{
401 struct drm_device *dev = encoder->dev;
402 struct drm_i915_private *dev_priv = dev->dev_private;
403 struct intel_lvds *intel_lvds = to_intel_lvds(encoder); 403 struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
404 404
405 /* We try to do the minimum that is necessary in order to unlock 405 /*
406 * the registers for mode setting.
407 *
408 * On Ironlake, this is quite simple as we just set the unlock key
409 * and ignore all subtleties. (This may cause some issues...)
410 *
411 * Prior to Ironlake, we must disable the pipe if we want to adjust 406 * Prior to Ironlake, we must disable the pipe if we want to adjust
412 * the panel fitter. However at all other times we can just reset 407 * the panel fitter. However at all other times we can just reset
413 * the registers regardless. 408 * the registers regardless.
414 */ 409 */
415 410 if (!HAS_PCH_SPLIT(encoder->dev) && intel_lvds->pfit_dirty)
416 if (HAS_PCH_SPLIT(dev)) { 411 intel_lvds_disable(intel_lvds);
417 I915_WRITE(PCH_PP_CONTROL,
418 I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
419 } else if (intel_lvds->pfit_dirty) {
420 I915_WRITE(PP_CONTROL,
421 (I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS)
422 & ~POWER_TARGET_ON);
423 } else {
424 I915_WRITE(PP_CONTROL,
425 I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
426 }
427} 412}
428 413
429static void intel_lvds_commit(struct drm_encoder *encoder) 414static void intel_lvds_commit(struct drm_encoder *encoder)
430{ 415{
431 struct drm_device *dev = encoder->dev;
432 struct drm_i915_private *dev_priv = dev->dev_private;
433 struct intel_lvds *intel_lvds = to_intel_lvds(encoder); 416 struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
434 417
435 /* Undo any unlocking done in prepare to prevent accidental
436 * adjustment of the registers.
437 */
438 if (HAS_PCH_SPLIT(dev)) {
439 u32 val = I915_READ(PCH_PP_CONTROL);
440 if ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS)
441 I915_WRITE(PCH_PP_CONTROL, val & 0x3);
442 } else {
443 u32 val = I915_READ(PP_CONTROL);
444 if ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS)
445 I915_WRITE(PP_CONTROL, val & 0x3);
446 }
447
448 /* Always do a full power on as we do not know what state 418 /* Always do a full power on as we do not know what state
449 * we were left in. 419 * we were left in.
450 */ 420 */
@@ -582,6 +552,8 @@ static void intel_lvds_destroy(struct drm_connector *connector)
582 struct drm_device *dev = connector->dev; 552 struct drm_device *dev = connector->dev;
583 struct drm_i915_private *dev_priv = dev->dev_private; 553 struct drm_i915_private *dev_priv = dev->dev_private;
584 554
555 intel_panel_destroy_backlight(dev);
556
585 if (dev_priv->lid_notifier.notifier_call) 557 if (dev_priv->lid_notifier.notifier_call)
586 acpi_lid_notifier_unregister(&dev_priv->lid_notifier); 558 acpi_lid_notifier_unregister(&dev_priv->lid_notifier);
587 drm_sysfs_connector_remove(connector); 559 drm_sysfs_connector_remove(connector);
@@ -1040,6 +1012,19 @@ out:
1040 pwm = I915_READ(BLC_PWM_PCH_CTL1); 1012 pwm = I915_READ(BLC_PWM_PCH_CTL1);
1041 pwm |= PWM_PCH_ENABLE; 1013 pwm |= PWM_PCH_ENABLE;
1042 I915_WRITE(BLC_PWM_PCH_CTL1, pwm); 1014 I915_WRITE(BLC_PWM_PCH_CTL1, pwm);
1015 /*
1016 * Unlock registers and just
1017 * leave them unlocked
1018 */
1019 I915_WRITE(PCH_PP_CONTROL,
1020 I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
1021 } else {
1022 /*
1023 * Unlock registers and just
1024 * leave them unlocked
1025 */
1026 I915_WRITE(PP_CONTROL,
1027 I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
1043 } 1028 }
1044 dev_priv->lid_notifier.notifier_call = intel_lid_notify; 1029 dev_priv->lid_notifier.notifier_call = intel_lid_notify;
1045 if (acpi_lid_notifier_register(&dev_priv->lid_notifier)) { 1030 if (acpi_lid_notifier_register(&dev_priv->lid_notifier)) {
@@ -1049,6 +1034,9 @@ out:
1049 /* keep the LVDS connector */ 1034 /* keep the LVDS connector */
1050 dev_priv->int_lvds_connector = connector; 1035 dev_priv->int_lvds_connector = connector;
1051 drm_sysfs_connector_add(connector); 1036 drm_sysfs_connector_add(connector);
1037
1038 intel_panel_setup_backlight(dev);
1039
1052 return true; 1040 return true;
1053 1041
1054failed: 1042failed:
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index b7c5ddb564d1..b8e8158bb16e 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -227,7 +227,6 @@ void intel_opregion_asle_intr(struct drm_device *dev)
227 asle->aslc = asle_stat; 227 asle->aslc = asle_stat;
228} 228}
229 229
230/* Only present on Ironlake+ */
231void intel_opregion_gse_intr(struct drm_device *dev) 230void intel_opregion_gse_intr(struct drm_device *dev)
232{ 231{
233 struct drm_i915_private *dev_priv = dev->dev_private; 232 struct drm_i915_private *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 05f500cd9c24..a9e0c7bcd317 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -277,7 +277,7 @@ void intel_panel_enable_backlight(struct drm_device *dev)
277 dev_priv->backlight_enabled = true; 277 dev_priv->backlight_enabled = true;
278} 278}
279 279
280void intel_panel_setup_backlight(struct drm_device *dev) 280static void intel_panel_init_backlight(struct drm_device *dev)
281{ 281{
282 struct drm_i915_private *dev_priv = dev->dev_private; 282 struct drm_i915_private *dev_priv = dev->dev_private;
283 283
@@ -309,3 +309,73 @@ intel_panel_detect(struct drm_device *dev)
309 309
310 return connector_status_unknown; 310 return connector_status_unknown;
311} 311}
312
313#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
314static int intel_panel_update_status(struct backlight_device *bd)
315{
316 struct drm_device *dev = bl_get_data(bd);
317 intel_panel_set_backlight(dev, bd->props.brightness);
318 return 0;
319}
320
321static int intel_panel_get_brightness(struct backlight_device *bd)
322{
323 struct drm_device *dev = bl_get_data(bd);
324 return intel_panel_get_backlight(dev);
325}
326
327static const struct backlight_ops intel_panel_bl_ops = {
328 .update_status = intel_panel_update_status,
329 .get_brightness = intel_panel_get_brightness,
330};
331
332int intel_panel_setup_backlight(struct drm_device *dev)
333{
334 struct drm_i915_private *dev_priv = dev->dev_private;
335 struct backlight_properties props;
336 struct drm_connector *connector;
337
338 intel_panel_init_backlight(dev);
339
340 if (dev_priv->int_lvds_connector)
341 connector = dev_priv->int_lvds_connector;
342 else if (dev_priv->int_edp_connector)
343 connector = dev_priv->int_edp_connector;
344 else
345 return -ENODEV;
346
347 props.type = BACKLIGHT_RAW;
348 props.max_brightness = intel_panel_get_max_backlight(dev);
349 dev_priv->backlight =
350 backlight_device_register("intel_backlight",
351 &connector->kdev, dev,
352 &intel_panel_bl_ops, &props);
353
354 if (IS_ERR(dev_priv->backlight)) {
355 DRM_ERROR("Failed to register backlight: %ld\n",
356 PTR_ERR(dev_priv->backlight));
357 dev_priv->backlight = NULL;
358 return -ENODEV;
359 }
360 dev_priv->backlight->props.brightness = intel_panel_get_backlight(dev);
361 return 0;
362}
363
364void intel_panel_destroy_backlight(struct drm_device *dev)
365{
366 struct drm_i915_private *dev_priv = dev->dev_private;
367 if (dev_priv->backlight)
368 backlight_device_unregister(dev_priv->backlight);
369}
370#else
371int intel_panel_setup_backlight(struct drm_device *dev)
372{
373 intel_panel_init_backlight(dev);
374 return 0;
375}
376
377void intel_panel_destroy_backlight(struct drm_device *dev)
378{
379 return;
380}
381#endif
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 47b9b2777038..c30626ea9f93 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -290,6 +290,10 @@ static int init_render_ring(struct intel_ring_buffer *ring)
290 if (IS_GEN6(dev) || IS_GEN7(dev)) 290 if (IS_GEN6(dev) || IS_GEN7(dev))
291 mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE; 291 mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
292 I915_WRITE(MI_MODE, mode); 292 I915_WRITE(MI_MODE, mode);
293 if (IS_GEN7(dev))
294 I915_WRITE(GFX_MODE_GEN7,
295 GFX_MODE_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
296 GFX_MODE_ENABLE(GFX_REPLAY_MODE));
293 } 297 }
294 298
295 if (INTEL_INFO(dev)->gen >= 6) { 299 if (INTEL_INFO(dev)->gen >= 6) {
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 30fe554d8936..6348c499616f 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -92,6 +92,11 @@ struct intel_sdvo {
92 */ 92 */
93 uint16_t attached_output; 93 uint16_t attached_output;
94 94
95 /*
96 * Hotplug activation bits for this device
97 */
98 uint8_t hotplug_active[2];
99
95 /** 100 /**
96 * This is used to select the color range of RBG outputs in HDMI mode. 101 * This is used to select the color range of RBG outputs in HDMI mode.
97 * It is only valid when using TMDS encoding and 8 bit per color mode. 102 * It is only valid when using TMDS encoding and 8 bit per color mode.
@@ -1208,74 +1213,20 @@ static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct in
1208 return true; 1213 return true;
1209} 1214}
1210 1215
1211/* No use! */ 1216static int intel_sdvo_supports_hotplug(struct intel_sdvo *intel_sdvo)
1212#if 0
1213struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB)
1214{
1215 struct drm_connector *connector = NULL;
1216 struct intel_sdvo *iout = NULL;
1217 struct intel_sdvo *sdvo;
1218
1219 /* find the sdvo connector */
1220 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1221 iout = to_intel_sdvo(connector);
1222
1223 if (iout->type != INTEL_OUTPUT_SDVO)
1224 continue;
1225
1226 sdvo = iout->dev_priv;
1227
1228 if (sdvo->sdvo_reg == SDVOB && sdvoB)
1229 return connector;
1230
1231 if (sdvo->sdvo_reg == SDVOC && !sdvoB)
1232 return connector;
1233
1234 }
1235
1236 return NULL;
1237}
1238
1239int intel_sdvo_supports_hotplug(struct drm_connector *connector)
1240{ 1217{
1241 u8 response[2]; 1218 u8 response[2];
1242 u8 status;
1243 struct intel_sdvo *intel_sdvo;
1244 DRM_DEBUG_KMS("\n");
1245
1246 if (!connector)
1247 return 0;
1248
1249 intel_sdvo = to_intel_sdvo(connector);
1250 1219
1251 return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, 1220 return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
1252 &response, 2) && response[0]; 1221 &response, 2) && response[0];
1253} 1222}
1254 1223
1255void intel_sdvo_set_hotplug(struct drm_connector *connector, int on) 1224static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder)
1256{ 1225{
1257 u8 response[2]; 1226 struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
1258 u8 status;
1259 struct intel_sdvo *intel_sdvo = to_intel_sdvo(connector);
1260
1261 intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
1262 intel_sdvo_read_response(intel_sdvo, &response, 2);
1263
1264 if (on) {
1265 intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0);
1266 status = intel_sdvo_read_response(intel_sdvo, &response, 2);
1267
1268 intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
1269 } else {
1270 response[0] = 0;
1271 response[1] = 0;
1272 intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
1273 }
1274 1227
1275 intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); 1228 intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &intel_sdvo->hotplug_active, 2);
1276 intel_sdvo_read_response(intel_sdvo, &response, 2);
1277} 1229}
1278#endif
1279 1230
1280static bool 1231static bool
1281intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo) 1232intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo)
@@ -2045,6 +1996,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
2045{ 1996{
2046 struct drm_encoder *encoder = &intel_sdvo->base.base; 1997 struct drm_encoder *encoder = &intel_sdvo->base.base;
2047 struct drm_connector *connector; 1998 struct drm_connector *connector;
1999 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
2048 struct intel_connector *intel_connector; 2000 struct intel_connector *intel_connector;
2049 struct intel_sdvo_connector *intel_sdvo_connector; 2001 struct intel_sdvo_connector *intel_sdvo_connector;
2050 2002
@@ -2062,7 +2014,17 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
2062 2014
2063 intel_connector = &intel_sdvo_connector->base; 2015 intel_connector = &intel_sdvo_connector->base;
2064 connector = &intel_connector->base; 2016 connector = &intel_connector->base;
2065 connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; 2017 if (intel_sdvo_supports_hotplug(intel_sdvo) & (1 << device)) {
2018 connector->polled = DRM_CONNECTOR_POLL_HPD;
2019 intel_sdvo->hotplug_active[0] |= 1 << device;
2020 /* Some SDVO devices have one-shot hotplug interrupts.
2021 * Ensure that they get re-enabled when an interrupt happens.
2022 */
2023 intel_encoder->hot_plug = intel_sdvo_enable_hotplug;
2024 intel_sdvo_enable_hotplug(intel_encoder);
2025 }
2026 else
2027 connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
2066 encoder->encoder_type = DRM_MODE_ENCODER_TMDS; 2028 encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
2067 connector->connector_type = DRM_MODE_CONNECTOR_DVID; 2029 connector->connector_type = DRM_MODE_CONNECTOR_DVID;
2068 2030
@@ -2569,6 +2531,14 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
2569 if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps)) 2531 if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps))
2570 goto err; 2532 goto err;
2571 2533
2534 /* Set up hotplug command - note paranoia about contents of reply.
2535 * We assume that the hardware is in a sane state, and only touch
2536 * the bits we think we understand.
2537 */
2538 intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG,
2539 &intel_sdvo->hotplug_active, 2);
2540 intel_sdvo->hotplug_active[0] &= ~0x3;
2541
2572 if (intel_sdvo_output_setup(intel_sdvo, 2542 if (intel_sdvo_output_setup(intel_sdvo,
2573 intel_sdvo->caps.output_flags) != true) { 2543 intel_sdvo->caps.output_flags) != true) {
2574 DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n", 2544 DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n",
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 8d02d875376d..c919cfc8f2fd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -530,7 +530,8 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
530 nouveau_gpuobj_ref(NULL, &obj); 530 nouveau_gpuobj_ref(NULL, &obj);
531 if (ret) 531 if (ret)
532 return ret; 532 return ret;
533 } else { 533 } else
534 if (USE_SEMA(dev)) {
534 /* map fence bo into channel's vm */ 535 /* map fence bo into channel's vm */
535 ret = nouveau_bo_vma_add(dev_priv->fence.bo, chan->vm, 536 ret = nouveau_bo_vma_add(dev_priv->fence.bo, chan->vm,
536 &chan->fence.vma); 537 &chan->fence.vma);
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index c444cadbf849..2706cb3d871a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -37,8 +37,11 @@ nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
37 return -ENOMEM; 37 return -ENOMEM;
38 38
39 nvbe->ttm_alloced = kmalloc(sizeof(bool) * num_pages, GFP_KERNEL); 39 nvbe->ttm_alloced = kmalloc(sizeof(bool) * num_pages, GFP_KERNEL);
40 if (!nvbe->ttm_alloced) 40 if (!nvbe->ttm_alloced) {
41 kfree(nvbe->pages);
42 nvbe->pages = NULL;
41 return -ENOMEM; 43 return -ENOMEM;
44 }
42 45
43 nvbe->nr_pages = 0; 46 nvbe->nr_pages = 0;
44 while (num_pages--) { 47 while (num_pages--) {
@@ -126,7 +129,7 @@ nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
126 129
127 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) { 130 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) {
128 nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3); 131 nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3);
129 dma_offset += NV_CTXDMA_PAGE_SIZE; 132 offset_l += NV_CTXDMA_PAGE_SIZE;
130 } 133 }
131 } 134 }
132 135
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
index 118261d4927a..5e45398a9e2d 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -781,11 +781,20 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
781 struct drm_device *dev = crtc->dev; 781 struct drm_device *dev = crtc->dev;
782 struct drm_nouveau_private *dev_priv = dev->dev_private; 782 struct drm_nouveau_private *dev_priv = dev->dev_private;
783 struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index]; 783 struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
784 struct drm_framebuffer *drm_fb = nv_crtc->base.fb; 784 struct drm_framebuffer *drm_fb;
785 struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb); 785 struct nouveau_framebuffer *fb;
786 int arb_burst, arb_lwm; 786 int arb_burst, arb_lwm;
787 int ret; 787 int ret;
788 788
789 NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
790
791 /* no fb bound */
792 if (!atomic && !crtc->fb) {
793 NV_DEBUG_KMS(dev, "No FB bound\n");
794 return 0;
795 }
796
797
789 /* If atomic, we want to switch to the fb we were passed, so 798 /* If atomic, we want to switch to the fb we were passed, so
790 * now we update pointers to do that. (We don't pin; just 799 * now we update pointers to do that. (We don't pin; just
791 * assume we're already pinned and update the base address.) 800 * assume we're already pinned and update the base address.)
@@ -794,6 +803,8 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
794 drm_fb = passed_fb; 803 drm_fb = passed_fb;
795 fb = nouveau_framebuffer(passed_fb); 804 fb = nouveau_framebuffer(passed_fb);
796 } else { 805 } else {
806 drm_fb = crtc->fb;
807 fb = nouveau_framebuffer(crtc->fb);
797 /* If not atomic, we can go ahead and pin, and unpin the 808 /* If not atomic, we can go ahead and pin, and unpin the
798 * old fb we were passed. 809 * old fb we were passed.
799 */ 810 */
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index 46ad59ea2185..5d989073ba6e 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -519,12 +519,18 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
519 struct drm_device *dev = nv_crtc->base.dev; 519 struct drm_device *dev = nv_crtc->base.dev;
520 struct drm_nouveau_private *dev_priv = dev->dev_private; 520 struct drm_nouveau_private *dev_priv = dev->dev_private;
521 struct nouveau_channel *evo = nv50_display(dev)->master; 521 struct nouveau_channel *evo = nv50_display(dev)->master;
522 struct drm_framebuffer *drm_fb = nv_crtc->base.fb; 522 struct drm_framebuffer *drm_fb;
523 struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb); 523 struct nouveau_framebuffer *fb;
524 int ret; 524 int ret;
525 525
526 NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); 526 NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
527 527
528 /* no fb bound */
529 if (!atomic && !crtc->fb) {
530 NV_DEBUG_KMS(dev, "No FB bound\n");
531 return 0;
532 }
533
528 /* If atomic, we want to switch to the fb we were passed, so 534 /* If atomic, we want to switch to the fb we were passed, so
529 * now we update pointers to do that. (We don't pin; just 535 * now we update pointers to do that. (We don't pin; just
530 * assume we're already pinned and update the base address.) 536 * assume we're already pinned and update the base address.)
@@ -533,6 +539,8 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
533 drm_fb = passed_fb; 539 drm_fb = passed_fb;
534 fb = nouveau_framebuffer(passed_fb); 540 fb = nouveau_framebuffer(passed_fb);
535 } else { 541 } else {
542 drm_fb = crtc->fb;
543 fb = nouveau_framebuffer(crtc->fb);
536 /* If not atomic, we can go ahead and pin, and unpin the 544 /* If not atomic, we can go ahead and pin, and unpin the
537 * old fb we were passed. 545 * old fb we were passed.
538 */ 546 */
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 645b84b3d203..4da23889fea6 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -115,6 +115,7 @@ static int radeon_dp_aux_native_write(struct radeon_connector *radeon_connector,
115 u8 msg[20]; 115 u8 msg[20];
116 int msg_bytes = send_bytes + 4; 116 int msg_bytes = send_bytes + 4;
117 u8 ack; 117 u8 ack;
118 unsigned retry;
118 119
119 if (send_bytes > 16) 120 if (send_bytes > 16)
120 return -1; 121 return -1;
@@ -125,20 +126,20 @@ static int radeon_dp_aux_native_write(struct radeon_connector *radeon_connector,
125 msg[3] = (msg_bytes << 4) | (send_bytes - 1); 126 msg[3] = (msg_bytes << 4) | (send_bytes - 1);
126 memcpy(&msg[4], send, send_bytes); 127 memcpy(&msg[4], send, send_bytes);
127 128
128 while (1) { 129 for (retry = 0; retry < 4; retry++) {
129 ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, 130 ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus,
130 msg, msg_bytes, NULL, 0, delay, &ack); 131 msg, msg_bytes, NULL, 0, delay, &ack);
131 if (ret < 0) 132 if (ret < 0)
132 return ret; 133 return ret;
133 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) 134 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
134 break; 135 return send_bytes;
135 else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) 136 else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
136 udelay(400); 137 udelay(400);
137 else 138 else
138 return -EIO; 139 return -EIO;
139 } 140 }
140 141
141 return send_bytes; 142 return -EIO;
142} 143}
143 144
144static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector, 145static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector,
@@ -149,26 +150,29 @@ static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector,
149 int msg_bytes = 4; 150 int msg_bytes = 4;
150 u8 ack; 151 u8 ack;
151 int ret; 152 int ret;
153 unsigned retry;
152 154
153 msg[0] = address; 155 msg[0] = address;
154 msg[1] = address >> 8; 156 msg[1] = address >> 8;
155 msg[2] = AUX_NATIVE_READ << 4; 157 msg[2] = AUX_NATIVE_READ << 4;
156 msg[3] = (msg_bytes << 4) | (recv_bytes - 1); 158 msg[3] = (msg_bytes << 4) | (recv_bytes - 1);
157 159
158 while (1) { 160 for (retry = 0; retry < 4; retry++) {
159 ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, 161 ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus,
160 msg, msg_bytes, recv, recv_bytes, delay, &ack); 162 msg, msg_bytes, recv, recv_bytes, delay, &ack);
161 if (ret == 0)
162 return -EPROTO;
163 if (ret < 0) 163 if (ret < 0)
164 return ret; 164 return ret;
165 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) 165 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
166 return ret; 166 return ret;
167 else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) 167 else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
168 udelay(400); 168 udelay(400);
169 else if (ret == 0)
170 return -EPROTO;
169 else 171 else
170 return -EIO; 172 return -EIO;
171 } 173 }
174
175 return -EIO;
172} 176}
173 177
174static void radeon_write_dpcd_reg(struct radeon_connector *radeon_connector, 178static void radeon_write_dpcd_reg(struct radeon_connector *radeon_connector,
@@ -613,6 +617,18 @@ static bool radeon_dp_get_link_status(struct radeon_connector *radeon_connector,
613 return true; 617 return true;
614} 618}
615 619
620bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector)
621{
622 u8 link_status[DP_LINK_STATUS_SIZE];
623 struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
624
625 if (!radeon_dp_get_link_status(radeon_connector, link_status))
626 return false;
627 if (dp_channel_eq_ok(link_status, dig->dp_lane_count))
628 return false;
629 return true;
630}
631
616struct radeon_dp_link_train_info { 632struct radeon_dp_link_train_info {
617 struct radeon_device *rdev; 633 struct radeon_device *rdev;
618 struct drm_encoder *encoder; 634 struct drm_encoder *encoder;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 14dce9f22172..c4ffa14fb2f4 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -41,6 +41,31 @@ static void evergreen_gpu_init(struct radeon_device *rdev);
41void evergreen_fini(struct radeon_device *rdev); 41void evergreen_fini(struct radeon_device *rdev);
42static void evergreen_pcie_gen2_enable(struct radeon_device *rdev); 42static void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
43 43
44void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
45{
46 u16 ctl, v;
47 int cap, err;
48
49 cap = pci_pcie_cap(rdev->pdev);
50 if (!cap)
51 return;
52
53 err = pci_read_config_word(rdev->pdev, cap + PCI_EXP_DEVCTL, &ctl);
54 if (err)
55 return;
56
57 v = (ctl & PCI_EXP_DEVCTL_READRQ) >> 12;
58
59 /* if bios or OS sets MAX_READ_REQUEST_SIZE to an invalid value, fix it
60 * to avoid hangs or perfomance issues
61 */
62 if ((v == 0) || (v == 6) || (v == 7)) {
63 ctl &= ~PCI_EXP_DEVCTL_READRQ;
64 ctl |= (2 << 12);
65 pci_write_config_word(rdev->pdev, cap + PCI_EXP_DEVCTL, ctl);
66 }
67}
68
44void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc) 69void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc)
45{ 70{
46 /* enable the pflip int */ 71 /* enable the pflip int */
@@ -743,7 +768,7 @@ static void evergreen_program_watermarks(struct radeon_device *rdev,
743 !evergreen_average_bandwidth_vs_available_bandwidth(&wm) || 768 !evergreen_average_bandwidth_vs_available_bandwidth(&wm) ||
744 !evergreen_check_latency_hiding(&wm) || 769 !evergreen_check_latency_hiding(&wm) ||
745 (rdev->disp_priority == 2)) { 770 (rdev->disp_priority == 2)) {
746 DRM_INFO("force priority to high\n"); 771 DRM_DEBUG_KMS("force priority to high\n");
747 priority_a_cnt |= PRIORITY_ALWAYS_ON; 772 priority_a_cnt |= PRIORITY_ALWAYS_ON;
748 priority_b_cnt |= PRIORITY_ALWAYS_ON; 773 priority_b_cnt |= PRIORITY_ALWAYS_ON;
749 } 774 }
@@ -1357,6 +1382,7 @@ int evergreen_cp_resume(struct radeon_device *rdev)
1357 SOFT_RESET_PA | 1382 SOFT_RESET_PA |
1358 SOFT_RESET_SH | 1383 SOFT_RESET_SH |
1359 SOFT_RESET_VGT | 1384 SOFT_RESET_VGT |
1385 SOFT_RESET_SPI |
1360 SOFT_RESET_SX)); 1386 SOFT_RESET_SX));
1361 RREG32(GRBM_SOFT_RESET); 1387 RREG32(GRBM_SOFT_RESET);
1362 mdelay(15); 1388 mdelay(15);
@@ -1378,7 +1404,8 @@ int evergreen_cp_resume(struct radeon_device *rdev)
1378 /* Initialize the ring buffer's read and write pointers */ 1404 /* Initialize the ring buffer's read and write pointers */
1379 WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); 1405 WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
1380 WREG32(CP_RB_RPTR_WR, 0); 1406 WREG32(CP_RB_RPTR_WR, 0);
1381 WREG32(CP_RB_WPTR, 0); 1407 rdev->cp.wptr = 0;
1408 WREG32(CP_RB_WPTR, rdev->cp.wptr);
1382 1409
1383 /* set the wb address wether it's enabled or not */ 1410 /* set the wb address wether it's enabled or not */
1384 WREG32(CP_RB_RPTR_ADDR, 1411 WREG32(CP_RB_RPTR_ADDR,
@@ -1400,7 +1427,6 @@ int evergreen_cp_resume(struct radeon_device *rdev)
1400 WREG32(CP_DEBUG, (1 << 27) | (1 << 28)); 1427 WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
1401 1428
1402 rdev->cp.rptr = RREG32(CP_RB_RPTR); 1429 rdev->cp.rptr = RREG32(CP_RB_RPTR);
1403 rdev->cp.wptr = RREG32(CP_RB_WPTR);
1404 1430
1405 evergreen_cp_start(rdev); 1431 evergreen_cp_start(rdev);
1406 rdev->cp.ready = true; 1432 rdev->cp.ready = true;
@@ -1564,48 +1590,6 @@ static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
1564 return backend_map; 1590 return backend_map;
1565} 1591}
1566 1592
1567static void evergreen_program_channel_remap(struct radeon_device *rdev)
1568{
1569 u32 tcp_chan_steer_lo, tcp_chan_steer_hi, mc_shared_chremap, tmp;
1570
1571 tmp = RREG32(MC_SHARED_CHMAP);
1572 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
1573 case 0:
1574 case 1:
1575 case 2:
1576 case 3:
1577 default:
1578 /* default mapping */
1579 mc_shared_chremap = 0x00fac688;
1580 break;
1581 }
1582
1583 switch (rdev->family) {
1584 case CHIP_HEMLOCK:
1585 case CHIP_CYPRESS:
1586 case CHIP_BARTS:
1587 tcp_chan_steer_lo = 0x54763210;
1588 tcp_chan_steer_hi = 0x0000ba98;
1589 break;
1590 case CHIP_JUNIPER:
1591 case CHIP_REDWOOD:
1592 case CHIP_CEDAR:
1593 case CHIP_PALM:
1594 case CHIP_SUMO:
1595 case CHIP_SUMO2:
1596 case CHIP_TURKS:
1597 case CHIP_CAICOS:
1598 default:
1599 tcp_chan_steer_lo = 0x76543210;
1600 tcp_chan_steer_hi = 0x0000ba98;
1601 break;
1602 }
1603
1604 WREG32(TCP_CHAN_STEER_LO, tcp_chan_steer_lo);
1605 WREG32(TCP_CHAN_STEER_HI, tcp_chan_steer_hi);
1606 WREG32(MC_SHARED_CHREMAP, mc_shared_chremap);
1607}
1608
1609static void evergreen_gpu_init(struct radeon_device *rdev) 1593static void evergreen_gpu_init(struct radeon_device *rdev)
1610{ 1594{
1611 u32 cc_rb_backend_disable = 0; 1595 u32 cc_rb_backend_disable = 0;
@@ -1862,6 +1846,8 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1862 1846
1863 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); 1847 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
1864 1848
1849 evergreen_fix_pci_max_read_req_size(rdev);
1850
1865 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & ~2; 1851 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & ~2;
1866 1852
1867 cc_gc_shader_pipe_config |= 1853 cc_gc_shader_pipe_config |=
@@ -2050,8 +2036,6 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
2050 WREG32(DMIF_ADDR_CONFIG, gb_addr_config); 2036 WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
2051 WREG32(HDP_ADDR_CONFIG, gb_addr_config); 2037 WREG32(HDP_ADDR_CONFIG, gb_addr_config);
2052 2038
2053 evergreen_program_channel_remap(rdev);
2054
2055 num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1; 2039 num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1;
2056 grbm_gfx_index = INSTANCE_BROADCAST_WRITES; 2040 grbm_gfx_index = INSTANCE_BROADCAST_WRITES;
2057 2041
@@ -3143,21 +3127,23 @@ int evergreen_suspend(struct radeon_device *rdev)
3143} 3127}
3144 3128
3145int evergreen_copy_blit(struct radeon_device *rdev, 3129int evergreen_copy_blit(struct radeon_device *rdev,
3146 uint64_t src_offset, uint64_t dst_offset, 3130 uint64_t src_offset,
3147 unsigned num_pages, struct radeon_fence *fence) 3131 uint64_t dst_offset,
3132 unsigned num_gpu_pages,
3133 struct radeon_fence *fence)
3148{ 3134{
3149 int r; 3135 int r;
3150 3136
3151 mutex_lock(&rdev->r600_blit.mutex); 3137 mutex_lock(&rdev->r600_blit.mutex);
3152 rdev->r600_blit.vb_ib = NULL; 3138 rdev->r600_blit.vb_ib = NULL;
3153 r = evergreen_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE); 3139 r = evergreen_blit_prepare_copy(rdev, num_gpu_pages * RADEON_GPU_PAGE_SIZE);
3154 if (r) { 3140 if (r) {
3155 if (rdev->r600_blit.vb_ib) 3141 if (rdev->r600_blit.vb_ib)
3156 radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); 3142 radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
3157 mutex_unlock(&rdev->r600_blit.mutex); 3143 mutex_unlock(&rdev->r600_blit.mutex);
3158 return r; 3144 return r;
3159 } 3145 }
3160 evergreen_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE); 3146 evergreen_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages * RADEON_GPU_PAGE_SIZE);
3161 evergreen_blit_done_copy(rdev, fence); 3147 evergreen_blit_done_copy(rdev, fence);
3162 mutex_unlock(&rdev->r600_blit.mutex); 3148 mutex_unlock(&rdev->r600_blit.mutex);
3163 return 0; 3149 return 0;
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 44c4750f4518..8c79ca97753d 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -39,6 +39,7 @@ extern int evergreen_mc_wait_for_idle(struct radeon_device *rdev);
39extern void evergreen_mc_program(struct radeon_device *rdev); 39extern void evergreen_mc_program(struct radeon_device *rdev);
40extern void evergreen_irq_suspend(struct radeon_device *rdev); 40extern void evergreen_irq_suspend(struct radeon_device *rdev);
41extern int evergreen_mc_init(struct radeon_device *rdev); 41extern int evergreen_mc_init(struct radeon_device *rdev);
42extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev);
42 43
43#define EVERGREEN_PFP_UCODE_SIZE 1120 44#define EVERGREEN_PFP_UCODE_SIZE 1120
44#define EVERGREEN_PM4_UCODE_SIZE 1376 45#define EVERGREEN_PM4_UCODE_SIZE 1376
@@ -568,36 +569,6 @@ static u32 cayman_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
568 return backend_map; 569 return backend_map;
569} 570}
570 571
571static void cayman_program_channel_remap(struct radeon_device *rdev)
572{
573 u32 tcp_chan_steer_lo, tcp_chan_steer_hi, mc_shared_chremap, tmp;
574
575 tmp = RREG32(MC_SHARED_CHMAP);
576 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
577 case 0:
578 case 1:
579 case 2:
580 case 3:
581 default:
582 /* default mapping */
583 mc_shared_chremap = 0x00fac688;
584 break;
585 }
586
587 switch (rdev->family) {
588 case CHIP_CAYMAN:
589 default:
590 //tcp_chan_steer_lo = 0x54763210
591 tcp_chan_steer_lo = 0x76543210;
592 tcp_chan_steer_hi = 0x0000ba98;
593 break;
594 }
595
596 WREG32(TCP_CHAN_STEER_LO, tcp_chan_steer_lo);
597 WREG32(TCP_CHAN_STEER_HI, tcp_chan_steer_hi);
598 WREG32(MC_SHARED_CHREMAP, mc_shared_chremap);
599}
600
601static u32 cayman_get_disable_mask_per_asic(struct radeon_device *rdev, 572static u32 cayman_get_disable_mask_per_asic(struct radeon_device *rdev,
602 u32 disable_mask_per_se, 573 u32 disable_mask_per_se,
603 u32 max_disable_mask_per_se, 574 u32 max_disable_mask_per_se,
@@ -669,6 +640,8 @@ static void cayman_gpu_init(struct radeon_device *rdev)
669 640
670 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); 641 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
671 642
643 evergreen_fix_pci_max_read_req_size(rdev);
644
672 mc_shared_chmap = RREG32(MC_SHARED_CHMAP); 645 mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
673 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); 646 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
674 647
@@ -839,8 +812,6 @@ static void cayman_gpu_init(struct radeon_device *rdev)
839 WREG32(DMIF_ADDR_CONFIG, gb_addr_config); 812 WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
840 WREG32(HDP_ADDR_CONFIG, gb_addr_config); 813 WREG32(HDP_ADDR_CONFIG, gb_addr_config);
841 814
842 cayman_program_channel_remap(rdev);
843
844 /* primary versions */ 815 /* primary versions */
845 WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); 816 WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
846 WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); 817 WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
@@ -1159,6 +1130,7 @@ int cayman_cp_resume(struct radeon_device *rdev)
1159 SOFT_RESET_PA | 1130 SOFT_RESET_PA |
1160 SOFT_RESET_SH | 1131 SOFT_RESET_SH |
1161 SOFT_RESET_VGT | 1132 SOFT_RESET_VGT |
1133 SOFT_RESET_SPI |
1162 SOFT_RESET_SX)); 1134 SOFT_RESET_SX));
1163 RREG32(GRBM_SOFT_RESET); 1135 RREG32(GRBM_SOFT_RESET);
1164 mdelay(15); 1136 mdelay(15);
@@ -1183,7 +1155,8 @@ int cayman_cp_resume(struct radeon_device *rdev)
1183 1155
1184 /* Initialize the ring buffer's read and write pointers */ 1156 /* Initialize the ring buffer's read and write pointers */
1185 WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA); 1157 WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA);
1186 WREG32(CP_RB0_WPTR, 0); 1158 rdev->cp.wptr = 0;
1159 WREG32(CP_RB0_WPTR, rdev->cp.wptr);
1187 1160
1188 /* set the wb address wether it's enabled or not */ 1161 /* set the wb address wether it's enabled or not */
1189 WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC); 1162 WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
@@ -1203,7 +1176,6 @@ int cayman_cp_resume(struct radeon_device *rdev)
1203 WREG32(CP_RB0_BASE, rdev->cp.gpu_addr >> 8); 1176 WREG32(CP_RB0_BASE, rdev->cp.gpu_addr >> 8);
1204 1177
1205 rdev->cp.rptr = RREG32(CP_RB0_RPTR); 1178 rdev->cp.rptr = RREG32(CP_RB0_RPTR);
1206 rdev->cp.wptr = RREG32(CP_RB0_WPTR);
1207 1179
1208 /* ring1 - compute only */ 1180 /* ring1 - compute only */
1209 /* Set ring buffer size */ 1181 /* Set ring buffer size */
@@ -1216,7 +1188,8 @@ int cayman_cp_resume(struct radeon_device *rdev)
1216 1188
1217 /* Initialize the ring buffer's read and write pointers */ 1189 /* Initialize the ring buffer's read and write pointers */
1218 WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA); 1190 WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA);
1219 WREG32(CP_RB1_WPTR, 0); 1191 rdev->cp1.wptr = 0;
1192 WREG32(CP_RB1_WPTR, rdev->cp1.wptr);
1220 1193
1221 /* set the wb address wether it's enabled or not */ 1194 /* set the wb address wether it's enabled or not */
1222 WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC); 1195 WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC);
@@ -1228,7 +1201,6 @@ int cayman_cp_resume(struct radeon_device *rdev)
1228 WREG32(CP_RB1_BASE, rdev->cp1.gpu_addr >> 8); 1201 WREG32(CP_RB1_BASE, rdev->cp1.gpu_addr >> 8);
1229 1202
1230 rdev->cp1.rptr = RREG32(CP_RB1_RPTR); 1203 rdev->cp1.rptr = RREG32(CP_RB1_RPTR);
1231 rdev->cp1.wptr = RREG32(CP_RB1_WPTR);
1232 1204
1233 /* ring2 - compute only */ 1205 /* ring2 - compute only */
1234 /* Set ring buffer size */ 1206 /* Set ring buffer size */
@@ -1241,7 +1213,8 @@ int cayman_cp_resume(struct radeon_device *rdev)
1241 1213
1242 /* Initialize the ring buffer's read and write pointers */ 1214 /* Initialize the ring buffer's read and write pointers */
1243 WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA); 1215 WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA);
1244 WREG32(CP_RB2_WPTR, 0); 1216 rdev->cp2.wptr = 0;
1217 WREG32(CP_RB2_WPTR, rdev->cp2.wptr);
1245 1218
1246 /* set the wb address wether it's enabled or not */ 1219 /* set the wb address wether it's enabled or not */
1247 WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC); 1220 WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC);
@@ -1253,7 +1226,6 @@ int cayman_cp_resume(struct radeon_device *rdev)
1253 WREG32(CP_RB2_BASE, rdev->cp2.gpu_addr >> 8); 1226 WREG32(CP_RB2_BASE, rdev->cp2.gpu_addr >> 8);
1254 1227
1255 rdev->cp2.rptr = RREG32(CP_RB2_RPTR); 1228 rdev->cp2.rptr = RREG32(CP_RB2_RPTR);
1256 rdev->cp2.wptr = RREG32(CP_RB2_WPTR);
1257 1229
1258 /* start the rings */ 1230 /* start the rings */
1259 cayman_cp_start(rdev); 1231 cayman_cp_start(rdev);
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index f2204cb1ccdf..7fcdbbbf2979 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -721,11 +721,11 @@ void r100_fence_ring_emit(struct radeon_device *rdev,
721int r100_copy_blit(struct radeon_device *rdev, 721int r100_copy_blit(struct radeon_device *rdev,
722 uint64_t src_offset, 722 uint64_t src_offset,
723 uint64_t dst_offset, 723 uint64_t dst_offset,
724 unsigned num_pages, 724 unsigned num_gpu_pages,
725 struct radeon_fence *fence) 725 struct radeon_fence *fence)
726{ 726{
727 uint32_t cur_pages; 727 uint32_t cur_pages;
728 uint32_t stride_bytes = PAGE_SIZE; 728 uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE;
729 uint32_t pitch; 729 uint32_t pitch;
730 uint32_t stride_pixels; 730 uint32_t stride_pixels;
731 unsigned ndw; 731 unsigned ndw;
@@ -737,7 +737,7 @@ int r100_copy_blit(struct radeon_device *rdev,
737 /* radeon pitch is /64 */ 737 /* radeon pitch is /64 */
738 pitch = stride_bytes / 64; 738 pitch = stride_bytes / 64;
739 stride_pixels = stride_bytes / 4; 739 stride_pixels = stride_bytes / 4;
740 num_loops = DIV_ROUND_UP(num_pages, 8191); 740 num_loops = DIV_ROUND_UP(num_gpu_pages, 8191);
741 741
742 /* Ask for enough room for blit + flush + fence */ 742 /* Ask for enough room for blit + flush + fence */
743 ndw = 64 + (10 * num_loops); 743 ndw = 64 + (10 * num_loops);
@@ -746,12 +746,12 @@ int r100_copy_blit(struct radeon_device *rdev,
746 DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw); 746 DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw);
747 return -EINVAL; 747 return -EINVAL;
748 } 748 }
749 while (num_pages > 0) { 749 while (num_gpu_pages > 0) {
750 cur_pages = num_pages; 750 cur_pages = num_gpu_pages;
751 if (cur_pages > 8191) { 751 if (cur_pages > 8191) {
752 cur_pages = 8191; 752 cur_pages = 8191;
753 } 753 }
754 num_pages -= cur_pages; 754 num_gpu_pages -= cur_pages;
755 755
756 /* pages are in Y direction - height 756 /* pages are in Y direction - height
757 page width in X direction - width */ 757 page width in X direction - width */
@@ -773,8 +773,8 @@ int r100_copy_blit(struct radeon_device *rdev,
773 radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16)); 773 radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
774 radeon_ring_write(rdev, 0); 774 radeon_ring_write(rdev, 0);
775 radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16)); 775 radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
776 radeon_ring_write(rdev, num_pages); 776 radeon_ring_write(rdev, num_gpu_pages);
777 radeon_ring_write(rdev, num_pages); 777 radeon_ring_write(rdev, num_gpu_pages);
778 radeon_ring_write(rdev, cur_pages | (stride_pixels << 16)); 778 radeon_ring_write(rdev, cur_pages | (stride_pixels << 16));
779 } 779 }
780 radeon_ring_write(rdev, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0)); 780 radeon_ring_write(rdev, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0));
@@ -990,7 +990,8 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
990 /* Force read & write ptr to 0 */ 990 /* Force read & write ptr to 0 */
991 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE); 991 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE);
992 WREG32(RADEON_CP_RB_RPTR_WR, 0); 992 WREG32(RADEON_CP_RB_RPTR_WR, 0);
993 WREG32(RADEON_CP_RB_WPTR, 0); 993 rdev->cp.wptr = 0;
994 WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr);
994 995
995 /* set the wb address whether it's enabled or not */ 996 /* set the wb address whether it's enabled or not */
996 WREG32(R_00070C_CP_RB_RPTR_ADDR, 997 WREG32(R_00070C_CP_RB_RPTR_ADDR,
@@ -1007,9 +1008,6 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
1007 WREG32(RADEON_CP_RB_CNTL, tmp); 1008 WREG32(RADEON_CP_RB_CNTL, tmp);
1008 udelay(10); 1009 udelay(10);
1009 rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); 1010 rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
1010 rdev->cp.wptr = RREG32(RADEON_CP_RB_WPTR);
1011 /* protect against crazy HW on resume */
1012 rdev->cp.wptr &= rdev->cp.ptr_mask;
1013 /* Set cp mode to bus mastering & enable cp*/ 1011 /* Set cp mode to bus mastering & enable cp*/
1014 WREG32(RADEON_CP_CSQ_MODE, 1012 WREG32(RADEON_CP_CSQ_MODE,
1015 REG_SET(RADEON_INDIRECT2_START, indirect2_start) | 1013 REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index f24058300413..a1f3ba063c2d 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -84,7 +84,7 @@ static int r200_get_vtx_size_0(uint32_t vtx_fmt_0)
84int r200_copy_dma(struct radeon_device *rdev, 84int r200_copy_dma(struct radeon_device *rdev,
85 uint64_t src_offset, 85 uint64_t src_offset,
86 uint64_t dst_offset, 86 uint64_t dst_offset,
87 unsigned num_pages, 87 unsigned num_gpu_pages,
88 struct radeon_fence *fence) 88 struct radeon_fence *fence)
89{ 89{
90 uint32_t size; 90 uint32_t size;
@@ -93,7 +93,7 @@ int r200_copy_dma(struct radeon_device *rdev,
93 int r = 0; 93 int r = 0;
94 94
95 /* radeon pitch is /64 */ 95 /* radeon pitch is /64 */
96 size = num_pages << PAGE_SHIFT; 96 size = num_gpu_pages << RADEON_GPU_PAGE_SHIFT;
97 num_loops = DIV_ROUND_UP(size, 0x1FFFFF); 97 num_loops = DIV_ROUND_UP(size, 0x1FFFFF);
98 r = radeon_ring_lock(rdev, num_loops * 4 + 64); 98 r = radeon_ring_lock(rdev, num_loops * 4 + 64);
99 if (r) { 99 if (r) {
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index aa5571b73aa0..720dd99163f8 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2209,7 +2209,8 @@ int r600_cp_resume(struct radeon_device *rdev)
2209 /* Initialize the ring buffer's read and write pointers */ 2209 /* Initialize the ring buffer's read and write pointers */
2210 WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); 2210 WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
2211 WREG32(CP_RB_RPTR_WR, 0); 2211 WREG32(CP_RB_RPTR_WR, 0);
2212 WREG32(CP_RB_WPTR, 0); 2212 rdev->cp.wptr = 0;
2213 WREG32(CP_RB_WPTR, rdev->cp.wptr);
2213 2214
2214 /* set the wb address whether it's enabled or not */ 2215 /* set the wb address whether it's enabled or not */
2215 WREG32(CP_RB_RPTR_ADDR, 2216 WREG32(CP_RB_RPTR_ADDR,
@@ -2231,7 +2232,6 @@ int r600_cp_resume(struct radeon_device *rdev)
2231 WREG32(CP_DEBUG, (1 << 27) | (1 << 28)); 2232 WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
2232 2233
2233 rdev->cp.rptr = RREG32(CP_RB_RPTR); 2234 rdev->cp.rptr = RREG32(CP_RB_RPTR);
2234 rdev->cp.wptr = RREG32(CP_RB_WPTR);
2235 2235
2236 r600_cp_start(rdev); 2236 r600_cp_start(rdev);
2237 rdev->cp.ready = true; 2237 rdev->cp.ready = true;
@@ -2353,21 +2353,23 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
2353} 2353}
2354 2354
2355int r600_copy_blit(struct radeon_device *rdev, 2355int r600_copy_blit(struct radeon_device *rdev,
2356 uint64_t src_offset, uint64_t dst_offset, 2356 uint64_t src_offset,
2357 unsigned num_pages, struct radeon_fence *fence) 2357 uint64_t dst_offset,
2358 unsigned num_gpu_pages,
2359 struct radeon_fence *fence)
2358{ 2360{
2359 int r; 2361 int r;
2360 2362
2361 mutex_lock(&rdev->r600_blit.mutex); 2363 mutex_lock(&rdev->r600_blit.mutex);
2362 rdev->r600_blit.vb_ib = NULL; 2364 rdev->r600_blit.vb_ib = NULL;
2363 r = r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE); 2365 r = r600_blit_prepare_copy(rdev, num_gpu_pages * RADEON_GPU_PAGE_SIZE);
2364 if (r) { 2366 if (r) {
2365 if (rdev->r600_blit.vb_ib) 2367 if (rdev->r600_blit.vb_ib)
2366 radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); 2368 radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
2367 mutex_unlock(&rdev->r600_blit.mutex); 2369 mutex_unlock(&rdev->r600_blit.mutex);
2368 return r; 2370 return r;
2369 } 2371 }
2370 r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE); 2372 r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages * RADEON_GPU_PAGE_SIZE);
2371 r600_blit_done_copy(rdev, fence); 2373 r600_blit_done_copy(rdev, fence);
2372 mutex_unlock(&rdev->r600_blit.mutex); 2374 mutex_unlock(&rdev->r600_blit.mutex);
2373 return 0; 2375 return 0;
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 32807baf55e2..c1e056b35b29 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -322,6 +322,7 @@ union radeon_gart_table {
322 322
323#define RADEON_GPU_PAGE_SIZE 4096 323#define RADEON_GPU_PAGE_SIZE 4096
324#define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1) 324#define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1)
325#define RADEON_GPU_PAGE_SHIFT 12
325 326
326struct radeon_gart { 327struct radeon_gart {
327 dma_addr_t table_addr; 328 dma_addr_t table_addr;
@@ -914,17 +915,17 @@ struct radeon_asic {
914 int (*copy_blit)(struct radeon_device *rdev, 915 int (*copy_blit)(struct radeon_device *rdev,
915 uint64_t src_offset, 916 uint64_t src_offset,
916 uint64_t dst_offset, 917 uint64_t dst_offset,
917 unsigned num_pages, 918 unsigned num_gpu_pages,
918 struct radeon_fence *fence); 919 struct radeon_fence *fence);
919 int (*copy_dma)(struct radeon_device *rdev, 920 int (*copy_dma)(struct radeon_device *rdev,
920 uint64_t src_offset, 921 uint64_t src_offset,
921 uint64_t dst_offset, 922 uint64_t dst_offset,
922 unsigned num_pages, 923 unsigned num_gpu_pages,
923 struct radeon_fence *fence); 924 struct radeon_fence *fence);
924 int (*copy)(struct radeon_device *rdev, 925 int (*copy)(struct radeon_device *rdev,
925 uint64_t src_offset, 926 uint64_t src_offset,
926 uint64_t dst_offset, 927 uint64_t dst_offset,
927 unsigned num_pages, 928 unsigned num_gpu_pages,
928 struct radeon_fence *fence); 929 struct radeon_fence *fence);
929 uint32_t (*get_engine_clock)(struct radeon_device *rdev); 930 uint32_t (*get_engine_clock)(struct radeon_device *rdev);
930 void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock); 931 void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock);
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 3d7a0d7c6a9a..3dedaa07aac1 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -75,7 +75,7 @@ uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg);
75int r100_copy_blit(struct radeon_device *rdev, 75int r100_copy_blit(struct radeon_device *rdev,
76 uint64_t src_offset, 76 uint64_t src_offset,
77 uint64_t dst_offset, 77 uint64_t dst_offset,
78 unsigned num_pages, 78 unsigned num_gpu_pages,
79 struct radeon_fence *fence); 79 struct radeon_fence *fence);
80int r100_set_surface_reg(struct radeon_device *rdev, int reg, 80int r100_set_surface_reg(struct radeon_device *rdev, int reg,
81 uint32_t tiling_flags, uint32_t pitch, 81 uint32_t tiling_flags, uint32_t pitch,
@@ -143,7 +143,7 @@ extern void r100_post_page_flip(struct radeon_device *rdev, int crtc);
143extern int r200_copy_dma(struct radeon_device *rdev, 143extern int r200_copy_dma(struct radeon_device *rdev,
144 uint64_t src_offset, 144 uint64_t src_offset,
145 uint64_t dst_offset, 145 uint64_t dst_offset,
146 unsigned num_pages, 146 unsigned num_gpu_pages,
147 struct radeon_fence *fence); 147 struct radeon_fence *fence);
148void r200_set_safe_registers(struct radeon_device *rdev); 148void r200_set_safe_registers(struct radeon_device *rdev);
149 149
@@ -311,7 +311,7 @@ void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
311int r600_ring_test(struct radeon_device *rdev); 311int r600_ring_test(struct radeon_device *rdev);
312int r600_copy_blit(struct radeon_device *rdev, 312int r600_copy_blit(struct radeon_device *rdev,
313 uint64_t src_offset, uint64_t dst_offset, 313 uint64_t src_offset, uint64_t dst_offset,
314 unsigned num_pages, struct radeon_fence *fence); 314 unsigned num_gpu_pages, struct radeon_fence *fence);
315void r600_hpd_init(struct radeon_device *rdev); 315void r600_hpd_init(struct radeon_device *rdev);
316void r600_hpd_fini(struct radeon_device *rdev); 316void r600_hpd_fini(struct radeon_device *rdev);
317bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); 317bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
@@ -403,7 +403,7 @@ void evergreen_bandwidth_update(struct radeon_device *rdev);
403void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); 403void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
404int evergreen_copy_blit(struct radeon_device *rdev, 404int evergreen_copy_blit(struct radeon_device *rdev,
405 uint64_t src_offset, uint64_t dst_offset, 405 uint64_t src_offset, uint64_t dst_offset,
406 unsigned num_pages, struct radeon_fence *fence); 406 unsigned num_gpu_pages, struct radeon_fence *fence);
407void evergreen_hpd_init(struct radeon_device *rdev); 407void evergreen_hpd_init(struct radeon_device *rdev);
408void evergreen_hpd_fini(struct radeon_device *rdev); 408void evergreen_hpd_fini(struct radeon_device *rdev);
409bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); 409bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
index dcd0863e31ae..b6e18c8db9f5 100644
--- a/drivers/gpu/drm/radeon/radeon_clocks.c
+++ b/drivers/gpu/drm/radeon/radeon_clocks.c
@@ -219,6 +219,9 @@ void radeon_get_clock_info(struct drm_device *dev)
219 } else { 219 } else {
220 DRM_INFO("Using generic clock info\n"); 220 DRM_INFO("Using generic clock info\n");
221 221
222 /* may need to be per card */
223 rdev->clock.max_pixel_clock = 35000;
224
222 if (rdev->flags & RADEON_IS_IGP) { 225 if (rdev->flags & RADEON_IS_IGP) {
223 p1pll->reference_freq = 1432; 226 p1pll->reference_freq = 1432;
224 p2pll->reference_freq = 1432; 227 p2pll->reference_freq = 1432;
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index e0138b674aca..63675241c7ff 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -3298,6 +3298,14 @@ void radeon_combios_asic_init(struct drm_device *dev)
3298 rdev->pdev->subsystem_device == 0x30a4) 3298 rdev->pdev->subsystem_device == 0x30a4)
3299 return; 3299 return;
3300 3300
3301 /* quirk for rs4xx Compaq Presario V5245EU laptop to make it resume
3302 * - it hangs on resume inside the dynclk 1 table.
3303 */
3304 if (rdev->family == CHIP_RS480 &&
3305 rdev->pdev->subsystem_vendor == 0x103c &&
3306 rdev->pdev->subsystem_device == 0x30ae)
3307 return;
3308
3301 /* DYN CLK 1 */ 3309 /* DYN CLK 1 */
3302 table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE); 3310 table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
3303 if (table) 3311 if (table)
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 6d6b5f16bc09..bce63fd329d4 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -60,18 +60,20 @@ void radeon_connector_hotplug(struct drm_connector *connector)
60 60
61 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); 61 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
62 62
63 /* powering up/down the eDP panel generates hpd events which 63 /* if the connector is already off, don't turn it back on */
64 * can interfere with modesetting. 64 if (connector->dpms != DRM_MODE_DPMS_ON)
65 */
66 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
67 return; 65 return;
68 66
69 /* pre-r600 did not always have the hpd pins mapped accurately to connectors */ 67 /* just deal with DP (not eDP) here. */
70 if (rdev->family >= CHIP_R600) { 68 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
71 if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) 69 int saved_dpms = connector->dpms;
72 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); 70
73 else 71 /* Only turn off the display it it's physically disconnected */
72 if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd))
74 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); 73 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
74 else if (radeon_dp_needs_link_train(radeon_connector))
75 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
76 connector->dpms = saved_dpms;
75 } 77 }
76} 78}
77 79
@@ -464,6 +466,16 @@ static bool radeon_connector_needs_extended_probe(struct radeon_device *dev,
464 (supported_device == ATOM_DEVICE_DFP2_SUPPORT)) 466 (supported_device == ATOM_DEVICE_DFP2_SUPPORT))
465 return true; 467 return true;
466 } 468 }
469 /* TOSHIBA Satellite L300D with ATI Mobility Radeon x1100
470 * (RS690M) sends data to i2c bus for a HDMI connector that
471 * is not implemented */
472 if ((dev->pdev->device == 0x791f) &&
473 (dev->pdev->subsystem_vendor == 0x1179) &&
474 (dev->pdev->subsystem_device == 0xff68)) {
475 if ((connector_type == DRM_MODE_CONNECTOR_HDMIA) &&
476 (supported_device == ATOM_DEVICE_DFP2_SUPPORT))
477 return true;
478 }
467 479
468 /* Default: no EDID header probe required for DDC probing */ 480 /* Default: no EDID header probe required for DDC probing */
469 return false; 481 return false;
@@ -474,11 +486,19 @@ static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder,
474{ 486{
475 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 487 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
476 struct drm_display_mode *native_mode = &radeon_encoder->native_mode; 488 struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
489 struct drm_display_mode *t, *mode;
490
491 /* If the EDID preferred mode doesn't match the native mode, use it */
492 list_for_each_entry_safe(mode, t, &connector->probed_modes, head) {
493 if (mode->type & DRM_MODE_TYPE_PREFERRED) {
494 if (mode->hdisplay != native_mode->hdisplay ||
495 mode->vdisplay != native_mode->vdisplay)
496 memcpy(native_mode, mode, sizeof(*mode));
497 }
498 }
477 499
478 /* Try to get native mode details from EDID if necessary */ 500 /* Try to get native mode details from EDID if necessary */
479 if (!native_mode->clock) { 501 if (!native_mode->clock) {
480 struct drm_display_mode *t, *mode;
481
482 list_for_each_entry_safe(mode, t, &connector->probed_modes, head) { 502 list_for_each_entry_safe(mode, t, &connector->probed_modes, head) {
483 if (mode->hdisplay == native_mode->hdisplay && 503 if (mode->hdisplay == native_mode->hdisplay &&
484 mode->vdisplay == native_mode->vdisplay) { 504 mode->vdisplay == native_mode->vdisplay) {
@@ -489,6 +509,7 @@ static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder,
489 } 509 }
490 } 510 }
491 } 511 }
512
492 if (!native_mode->clock) { 513 if (!native_mode->clock) {
493 DRM_DEBUG_KMS("No LVDS native mode details, disabling RMX\n"); 514 DRM_DEBUG_KMS("No LVDS native mode details, disabling RMX\n");
494 radeon_encoder->rmx_type = RMX_OFF; 515 radeon_encoder->rmx_type = RMX_OFF;
@@ -1276,12 +1297,33 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
1276 if (!radeon_dig_connector->edp_on) 1297 if (!radeon_dig_connector->edp_on)
1277 atombios_set_edp_panel_power(connector, 1298 atombios_set_edp_panel_power(connector,
1278 ATOM_TRANSMITTER_ACTION_POWER_OFF); 1299 ATOM_TRANSMITTER_ACTION_POWER_OFF);
1279 } else { 1300 } else if (radeon_connector_encoder_is_dp_bridge(connector)) {
1280 /* need to setup ddc on the bridge */ 1301 /* DP bridges are always DP */
1281 if (radeon_connector_encoder_is_dp_bridge(connector)) { 1302 radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT;
1303 /* get the DPCD from the bridge */
1304 radeon_dp_getdpcd(radeon_connector);
1305
1306 if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd))
1307 ret = connector_status_connected;
1308 else {
1309 /* need to setup ddc on the bridge */
1282 if (encoder) 1310 if (encoder)
1283 radeon_atom_ext_encoder_setup_ddc(encoder); 1311 radeon_atom_ext_encoder_setup_ddc(encoder);
1312 if (radeon_ddc_probe(radeon_connector,
1313 radeon_connector->requires_extended_probe))
1314 ret = connector_status_connected;
1284 } 1315 }
1316
1317 if ((ret == connector_status_disconnected) &&
1318 radeon_connector->dac_load_detect) {
1319 struct drm_encoder *encoder = radeon_best_single_encoder(connector);
1320 struct drm_encoder_helper_funcs *encoder_funcs;
1321 if (encoder) {
1322 encoder_funcs = encoder->helper_private;
1323 ret = encoder_funcs->detect(encoder, connector);
1324 }
1325 }
1326 } else {
1285 radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector); 1327 radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector);
1286 if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) { 1328 if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
1287 ret = connector_status_connected; 1329 ret = connector_status_connected;
@@ -1297,16 +1339,6 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
1297 ret = connector_status_connected; 1339 ret = connector_status_connected;
1298 } 1340 }
1299 } 1341 }
1300
1301 if ((ret == connector_status_disconnected) &&
1302 radeon_connector->dac_load_detect) {
1303 struct drm_encoder *encoder = radeon_best_single_encoder(connector);
1304 struct drm_encoder_helper_funcs *encoder_funcs;
1305 if (encoder) {
1306 encoder_funcs = encoder->helper_private;
1307 ret = encoder_funcs->detect(encoder, connector);
1308 }
1309 }
1310 } 1342 }
1311 1343
1312 radeon_connector_update_scratch_regs(connector, ret); 1344 radeon_connector_update_scratch_regs(connector, ret);
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index 3189a7efb2e9..fde25c0d65a0 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -208,23 +208,25 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
208 int xorigin = 0, yorigin = 0; 208 int xorigin = 0, yorigin = 0;
209 int w = radeon_crtc->cursor_width; 209 int w = radeon_crtc->cursor_width;
210 210
211 if (x < 0)
212 xorigin = -x + 1;
213 if (y < 0)
214 yorigin = -y + 1;
215 if (xorigin >= CURSOR_WIDTH)
216 xorigin = CURSOR_WIDTH - 1;
217 if (yorigin >= CURSOR_HEIGHT)
218 yorigin = CURSOR_HEIGHT - 1;
219
220 if (ASIC_IS_AVIVO(rdev)) { 211 if (ASIC_IS_AVIVO(rdev)) {
221 int i = 0;
222 struct drm_crtc *crtc_p;
223
224 /* avivo cursor are offset into the total surface */ 212 /* avivo cursor are offset into the total surface */
225 x += crtc->x; 213 x += crtc->x;
226 y += crtc->y; 214 y += crtc->y;
227 DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y); 215 }
216 DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
217
218 if (x < 0) {
219 xorigin = min(-x, CURSOR_WIDTH - 1);
220 x = 0;
221 }
222 if (y < 0) {
223 yorigin = min(-y, CURSOR_HEIGHT - 1);
224 y = 0;
225 }
226
227 if (ASIC_IS_AVIVO(rdev)) {
228 int i = 0;
229 struct drm_crtc *crtc_p;
228 230
229 /* avivo cursor image can't end on 128 pixel boundary or 231 /* avivo cursor image can't end on 128 pixel boundary or
230 * go past the end of the frame if both crtcs are enabled 232 * go past the end of the frame if both crtcs are enabled
@@ -253,16 +255,12 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
253 255
254 radeon_lock_cursor(crtc, true); 256 radeon_lock_cursor(crtc, true);
255 if (ASIC_IS_DCE4(rdev)) { 257 if (ASIC_IS_DCE4(rdev)) {
256 WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset, 258 WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset, (x << 16) | y);
257 ((xorigin ? 0 : x) << 16) |
258 (yorigin ? 0 : y));
259 WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin); 259 WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin);
260 WREG32(EVERGREEN_CUR_SIZE + radeon_crtc->crtc_offset, 260 WREG32(EVERGREEN_CUR_SIZE + radeon_crtc->crtc_offset,
261 ((w - 1) << 16) | (radeon_crtc->cursor_height - 1)); 261 ((w - 1) << 16) | (radeon_crtc->cursor_height - 1));
262 } else if (ASIC_IS_AVIVO(rdev)) { 262 } else if (ASIC_IS_AVIVO(rdev)) {
263 WREG32(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset, 263 WREG32(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset, (x << 16) | y);
264 ((xorigin ? 0 : x) << 16) |
265 (yorigin ? 0 : y));
266 WREG32(AVIVO_D1CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin); 264 WREG32(AVIVO_D1CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin);
267 WREG32(AVIVO_D1CUR_SIZE + radeon_crtc->crtc_offset, 265 WREG32(AVIVO_D1CUR_SIZE + radeon_crtc->crtc_offset,
268 ((w - 1) << 16) | (radeon_crtc->cursor_height - 1)); 266 ((w - 1) << 16) | (radeon_crtc->cursor_height - 1));
@@ -276,8 +274,8 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
276 | yorigin)); 274 | yorigin));
277 WREG32(RADEON_CUR_HORZ_VERT_POSN + radeon_crtc->crtc_offset, 275 WREG32(RADEON_CUR_HORZ_VERT_POSN + radeon_crtc->crtc_offset,
278 (RADEON_CUR_LOCK 276 (RADEON_CUR_LOCK
279 | ((xorigin ? 0 : x) << 16) 277 | (x << 16)
280 | (yorigin ? 0 : y))); 278 | y));
281 /* offset is from DISP(2)_BASE_ADDRESS */ 279 /* offset is from DISP(2)_BASE_ADDRESS */
282 WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, (radeon_crtc->legacy_cursor_offset + 280 WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, (radeon_crtc->legacy_cursor_offset +
283 (yorigin * 256))); 281 (yorigin * 256)));
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 440e6ecccc40..b51e15725c6e 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -32,6 +32,7 @@
32#include <drm/radeon_drm.h> 32#include <drm/radeon_drm.h>
33#include <linux/vgaarb.h> 33#include <linux/vgaarb.h>
34#include <linux/vga_switcheroo.h> 34#include <linux/vga_switcheroo.h>
35#include <linux/efi.h>
35#include "radeon_reg.h" 36#include "radeon_reg.h"
36#include "radeon.h" 37#include "radeon.h"
37#include "atom.h" 38#include "atom.h"
@@ -300,6 +301,8 @@ void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64
300 mc->mc_vram_size = mc->aper_size; 301 mc->mc_vram_size = mc->aper_size;
301 } 302 }
302 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; 303 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
304 if (radeon_vram_limit && radeon_vram_limit < mc->real_vram_size)
305 mc->real_vram_size = radeon_vram_limit;
303 dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n", 306 dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
304 mc->mc_vram_size >> 20, mc->vram_start, 307 mc->mc_vram_size >> 20, mc->vram_start,
305 mc->vram_end, mc->real_vram_size >> 20); 308 mc->vram_end, mc->real_vram_size >> 20);
@@ -348,6 +351,9 @@ bool radeon_card_posted(struct radeon_device *rdev)
348{ 351{
349 uint32_t reg; 352 uint32_t reg;
350 353
354 if (efi_enabled && rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE)
355 return false;
356
351 /* first check CRTCs */ 357 /* first check CRTCs */
352 if (ASIC_IS_DCE41(rdev)) { 358 if (ASIC_IS_DCE41(rdev)) {
353 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | 359 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 1a858944e4f3..6adb3e58affd 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -473,8 +473,8 @@ pflip_cleanup:
473 spin_lock_irqsave(&dev->event_lock, flags); 473 spin_lock_irqsave(&dev->event_lock, flags);
474 radeon_crtc->unpin_work = NULL; 474 radeon_crtc->unpin_work = NULL;
475unlock_free: 475unlock_free:
476 drm_gem_object_unreference_unlocked(old_radeon_fb->obj);
477 spin_unlock_irqrestore(&dev->event_lock, flags); 476 spin_unlock_irqrestore(&dev->event_lock, flags);
477 drm_gem_object_unreference_unlocked(old_radeon_fb->obj);
478 radeon_fence_unref(&work->fence); 478 radeon_fence_unref(&work->fence);
479 kfree(work); 479 kfree(work);
480 480
@@ -707,16 +707,21 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
707 radeon_router_select_ddc_port(radeon_connector); 707 radeon_router_select_ddc_port(radeon_connector);
708 708
709 if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) || 709 if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
710 (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) { 710 (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) ||
711 radeon_connector_encoder_is_dp_bridge(&radeon_connector->base)) {
711 struct radeon_connector_atom_dig *dig = radeon_connector->con_priv; 712 struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
713
712 if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT || 714 if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT ||
713 dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) && dig->dp_i2c_bus) 715 dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) && dig->dp_i2c_bus)
714 radeon_connector->edid = drm_get_edid(&radeon_connector->base, &dig->dp_i2c_bus->adapter); 716 radeon_connector->edid = drm_get_edid(&radeon_connector->base,
715 } 717 &dig->dp_i2c_bus->adapter);
716 if (!radeon_connector->ddc_bus) 718 else if (radeon_connector->ddc_bus && !radeon_connector->edid)
717 return -1; 719 radeon_connector->edid = drm_get_edid(&radeon_connector->base,
718 if (!radeon_connector->edid) { 720 &radeon_connector->ddc_bus->adapter);
719 radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); 721 } else {
722 if (radeon_connector->ddc_bus && !radeon_connector->edid)
723 radeon_connector->edid = drm_get_edid(&radeon_connector->base,
724 &radeon_connector->ddc_bus->adapter);
720 } 725 }
721 726
722 if (!radeon_connector->edid) { 727 if (!radeon_connector->edid) {
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index b293487e5aa3..13690f3eb4a4 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -1507,7 +1507,14 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
1507 switch (mode) { 1507 switch (mode) {
1508 case DRM_MODE_DPMS_ON: 1508 case DRM_MODE_DPMS_ON:
1509 args.ucAction = ATOM_ENABLE; 1509 args.ucAction = ATOM_ENABLE;
1510 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 1510 /* workaround for DVOOutputControl on some RS690 systems */
1511 if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DDI) {
1512 u32 reg = RREG32(RADEON_BIOS_3_SCRATCH);
1513 WREG32(RADEON_BIOS_3_SCRATCH, reg & ~ATOM_S3_DFP2I_ACTIVE);
1514 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1515 WREG32(RADEON_BIOS_3_SCRATCH, reg);
1516 } else
1517 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1511 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { 1518 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
1512 args.ucAction = ATOM_LCD_BLON; 1519 args.ucAction = ATOM_LCD_BLON;
1513 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 1520 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
@@ -2323,6 +2330,9 @@ radeon_add_atom_encoder(struct drm_device *dev,
2323 default: 2330 default:
2324 encoder->possible_crtcs = 0x3; 2331 encoder->possible_crtcs = 0x3;
2325 break; 2332 break;
2333 case 4:
2334 encoder->possible_crtcs = 0xf;
2335 break;
2326 case 6: 2336 case 6:
2327 encoder->possible_crtcs = 0x3f; 2337 encoder->possible_crtcs = 0x3f;
2328 break; 2338 break;
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index d09031c03e26..68820f5f6303 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -479,6 +479,7 @@ extern void radeon_dp_set_link_config(struct drm_connector *connector,
479 struct drm_display_mode *mode); 479 struct drm_display_mode *mode);
480extern void radeon_dp_link_train(struct drm_encoder *encoder, 480extern void radeon_dp_link_train(struct drm_encoder *encoder,
481 struct drm_connector *connector); 481 struct drm_connector *connector);
482extern bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector);
482extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector); 483extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector);
483extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector); 484extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector);
484extern void atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode); 485extern void atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode);
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index dee4a0c1b4b2..602fa3541c45 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -40,10 +40,14 @@ void radeon_test_moves(struct radeon_device *rdev)
40 size = 1024 * 1024; 40 size = 1024 * 1024;
41 41
42 /* Number of tests = 42 /* Number of tests =
43 * (Total GTT - IB pool - writeback page - ring buffer) / test size 43 * (Total GTT - IB pool - writeback page - ring buffers) / test size
44 */ 44 */
45 n = ((u32)(rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - RADEON_GPU_PAGE_SIZE - 45 n = rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - rdev->cp.ring_size;
46 rdev->cp.ring_size)) / size; 46 if (rdev->wb.wb_obj)
47 n -= RADEON_GPU_PAGE_SIZE;
48 if (rdev->ih.ring_obj)
49 n -= rdev->ih.ring_size;
50 n /= size;
47 51
48 gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL); 52 gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL);
49 if (!gtt_obj) { 53 if (!gtt_obj) {
@@ -132,9 +136,15 @@ void radeon_test_moves(struct radeon_device *rdev)
132 gtt_start++, vram_start++) { 136 gtt_start++, vram_start++) {
133 if (*vram_start != gtt_start) { 137 if (*vram_start != gtt_start) {
134 DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, " 138 DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, "
135 "expected 0x%p (GTT map 0x%p-0x%p)\n", 139 "expected 0x%p (GTT/VRAM offset "
136 i, *vram_start, gtt_start, gtt_map, 140 "0x%16llx/0x%16llx)\n",
137 gtt_end); 141 i, *vram_start, gtt_start,
142 (unsigned long long)
143 (gtt_addr - rdev->mc.gtt_start +
144 (void*)gtt_start - gtt_map),
145 (unsigned long long)
146 (vram_addr - rdev->mc.vram_start +
147 (void*)gtt_start - gtt_map));
138 radeon_bo_kunmap(vram_obj); 148 radeon_bo_kunmap(vram_obj);
139 goto out_cleanup; 149 goto out_cleanup;
140 } 150 }
@@ -175,9 +185,15 @@ void radeon_test_moves(struct radeon_device *rdev)
175 gtt_start++, vram_start++) { 185 gtt_start++, vram_start++) {
176 if (*gtt_start != vram_start) { 186 if (*gtt_start != vram_start) {
177 DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, " 187 DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, "
178 "expected 0x%p (VRAM map 0x%p-0x%p)\n", 188 "expected 0x%p (VRAM/GTT offset "
179 i, *gtt_start, vram_start, vram_map, 189 "0x%16llx/0x%16llx)\n",
180 vram_end); 190 i, *gtt_start, vram_start,
191 (unsigned long long)
192 (vram_addr - rdev->mc.vram_start +
193 (void*)vram_start - vram_map),
194 (unsigned long long)
195 (gtt_addr - rdev->mc.gtt_start +
196 (void*)vram_start - vram_map));
181 radeon_bo_kunmap(gtt_obj[i]); 197 radeon_bo_kunmap(gtt_obj[i]);
182 goto out_cleanup; 198 goto out_cleanup;
183 } 199 }
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 60125ddba1e9..0b5468bfaf54 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -277,7 +277,12 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
277 DRM_ERROR("Trying to move memory with CP turned off.\n"); 277 DRM_ERROR("Trying to move memory with CP turned off.\n");
278 return -EINVAL; 278 return -EINVAL;
279 } 279 }
280 r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages, fence); 280
281 BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0);
282
283 r = radeon_copy(rdev, old_start, new_start,
284 new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */
285 fence);
281 /* FIXME: handle copy error */ 286 /* FIXME: handle copy error */
282 r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL, 287 r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL,
283 evict, no_wait_reserve, no_wait_gpu, new_mem); 288 evict, no_wait_reserve, no_wait_gpu, new_mem);
@@ -450,6 +455,29 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_
450 return -EINVAL; 455 return -EINVAL;
451 mem->bus.base = rdev->mc.aper_base; 456 mem->bus.base = rdev->mc.aper_base;
452 mem->bus.is_iomem = true; 457 mem->bus.is_iomem = true;
458#ifdef __alpha__
459 /*
460 * Alpha: use bus.addr to hold the ioremap() return,
461 * so we can modify bus.base below.
462 */
463 if (mem->placement & TTM_PL_FLAG_WC)
464 mem->bus.addr =
465 ioremap_wc(mem->bus.base + mem->bus.offset,
466 mem->bus.size);
467 else
468 mem->bus.addr =
469 ioremap_nocache(mem->bus.base + mem->bus.offset,
470 mem->bus.size);
471
472 /*
473 * Alpha: Use just the bus offset plus
474 * the hose/domain memory base for bus.base.
475 * It then can be used to build PTEs for VRAM
476 * access, as done in ttm_bo_vm_fault().
477 */
478 mem->bus.base = (mem->bus.base & 0x0ffffffffUL) +
479 rdev->ddev->hose->dense_mem_base;
480#endif
453 break; 481 break;
454 default: 482 default:
455 return -EINVAL; 483 return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 4720d000d440..b13c2eedc321 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -536,55 +536,6 @@ static u32 r700_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
536 return backend_map; 536 return backend_map;
537} 537}
538 538
539static void rv770_program_channel_remap(struct radeon_device *rdev)
540{
541 u32 tcp_chan_steer, mc_shared_chremap, tmp;
542 bool force_no_swizzle;
543
544 switch (rdev->family) {
545 case CHIP_RV770:
546 case CHIP_RV730:
547 force_no_swizzle = false;
548 break;
549 case CHIP_RV710:
550 case CHIP_RV740:
551 default:
552 force_no_swizzle = true;
553 break;
554 }
555
556 tmp = RREG32(MC_SHARED_CHMAP);
557 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
558 case 0:
559 case 1:
560 default:
561 /* default mapping */
562 mc_shared_chremap = 0x00fac688;
563 break;
564 case 2:
565 case 3:
566 if (force_no_swizzle)
567 mc_shared_chremap = 0x00fac688;
568 else
569 mc_shared_chremap = 0x00bbc298;
570 break;
571 }
572
573 if (rdev->family == CHIP_RV740)
574 tcp_chan_steer = 0x00ef2a60;
575 else
576 tcp_chan_steer = 0x00fac688;
577
578 /* RV770 CE has special chremap setup */
579 if (rdev->pdev->device == 0x944e) {
580 tcp_chan_steer = 0x00b08b08;
581 mc_shared_chremap = 0x00b08b08;
582 }
583
584 WREG32(TCP_CHAN_STEER, tcp_chan_steer);
585 WREG32(MC_SHARED_CHREMAP, mc_shared_chremap);
586}
587
588static void rv770_gpu_init(struct radeon_device *rdev) 539static void rv770_gpu_init(struct radeon_device *rdev)
589{ 540{
590 int i, j, num_qd_pipes; 541 int i, j, num_qd_pipes;
@@ -785,8 +736,6 @@ static void rv770_gpu_init(struct radeon_device *rdev)
785 WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff)); 736 WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
786 WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff)); 737 WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
787 738
788 rv770_program_channel_remap(rdev);
789
790 WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); 739 WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
791 WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); 740 WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
792 WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); 741 WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 56619f64b6bf..ef06194c5aa6 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -353,8 +353,10 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
353 353
354 ret = ttm_tt_set_user(bo->ttm, current, 354 ret = ttm_tt_set_user(bo->ttm, current,
355 bo->buffer_start, bo->num_pages); 355 bo->buffer_start, bo->num_pages);
356 if (unlikely(ret != 0)) 356 if (unlikely(ret != 0)) {
357 ttm_tt_destroy(bo->ttm); 357 ttm_tt_destroy(bo->ttm);
358 bo->ttm = NULL;
359 }
358 break; 360 break;
359 default: 361 default:
360 printk(KERN_ERR TTM_PFX "Illegal buffer object type\n"); 362 printk(KERN_ERR TTM_PFX "Illegal buffer object type\n");
@@ -390,10 +392,13 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
390 * Create and bind a ttm if required. 392 * Create and bind a ttm if required.
391 */ 393 */
392 394
393 if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm == NULL)) { 395 if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
394 ret = ttm_bo_add_ttm(bo, false); 396 if (bo->ttm == NULL) {
395 if (ret) 397 bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED);
396 goto out_err; 398 ret = ttm_bo_add_ttm(bo, zero);
399 if (ret)
400 goto out_err;
401 }
397 402
398 ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement); 403 ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
399 if (ret) 404 if (ret)
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 77dbf408c0d0..ae3c6f5dd2b7 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -635,13 +635,13 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
635 if (ret) 635 if (ret)
636 return ret; 636 return ret;
637 637
638 ttm_bo_free_old_node(bo);
639 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && 638 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
640 (bo->ttm != NULL)) { 639 (bo->ttm != NULL)) {
641 ttm_tt_unbind(bo->ttm); 640 ttm_tt_unbind(bo->ttm);
642 ttm_tt_destroy(bo->ttm); 641 ttm_tt_destroy(bo->ttm);
643 bo->ttm = NULL; 642 bo->ttm = NULL;
644 } 643 }
644 ttm_bo_free_old_node(bo);
645 } else { 645 } else {
646 /** 646 /**
647 * This should help pipeline ordinary buffer moves. 647 * This should help pipeline ordinary buffer moves.
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 306b15f39c9c..1130a8987125 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -589,6 +589,7 @@ config HID_WACOM_POWER_SUPPLY
589config HID_WIIMOTE 589config HID_WIIMOTE
590 tristate "Nintendo Wii Remote support" 590 tristate "Nintendo Wii Remote support"
591 depends on BT_HIDP 591 depends on BT_HIDP
592 depends on LEDS_CLASS
592 ---help--- 593 ---help---
593 Support for the Nintendo Wii Remote bluetooth device. 594 Support for the Nintendo Wii Remote bluetooth device.
594 595
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index b85744fe8464..18b3bc646bf3 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -444,6 +444,12 @@ static const struct hid_device_id apple_devices[] = {
444 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS), 444 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS),
445 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN | 445 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN |
446 APPLE_RDESC_JIS }, 446 APPLE_RDESC_JIS },
447 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ANSI),
448 .driver_data = APPLE_HAS_FN },
449 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ISO),
450 .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
451 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_JIS),
452 .driver_data = APPLE_HAS_FN },
447 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI), 453 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI),
448 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN }, 454 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
449 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO), 455 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO),
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 1a5cf0c9cfca..242353df3dc4 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1340,6 +1340,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
1340 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI) }, 1340 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI) },
1341 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ISO) }, 1341 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ISO) },
1342 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS) }, 1342 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS) },
1343 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ANSI) },
1344 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ISO) },
1345 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_JIS) },
1343 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) }, 1346 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) },
1344 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) }, 1347 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) },
1345 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) }, 1348 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index db63ccf21cc8..7484e1b67249 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -109,6 +109,9 @@
109#define USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI 0x0245 109#define USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI 0x0245
110#define USB_DEVICE_ID_APPLE_WELLSPRING5_ISO 0x0246 110#define USB_DEVICE_ID_APPLE_WELLSPRING5_ISO 0x0246
111#define USB_DEVICE_ID_APPLE_WELLSPRING5_JIS 0x0247 111#define USB_DEVICE_ID_APPLE_WELLSPRING5_JIS 0x0247
112#define USB_DEVICE_ID_APPLE_ALU_REVB_ANSI 0x024f
113#define USB_DEVICE_ID_APPLE_ALU_REVB_ISO 0x0250
114#define USB_DEVICE_ID_APPLE_ALU_REVB_JIS 0x0251
112#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI 0x0239 115#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI 0x0239
113#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO 0x023a 116#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO 0x023a
114#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS 0x023b 117#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS 0x023b
@@ -274,6 +277,7 @@
274#define USB_DEVICE_ID_PENPOWER 0x00f4 277#define USB_DEVICE_ID_PENPOWER 0x00f4
275 278
276#define USB_VENDOR_ID_GREENASIA 0x0e8f 279#define USB_VENDOR_ID_GREENASIA 0x0e8f
280#define USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD 0x3013
277 281
278#define USB_VENDOR_ID_GRETAGMACBETH 0x0971 282#define USB_VENDOR_ID_GRETAGMACBETH 0x0971
279#define USB_DEVICE_ID_GRETAGMACBETH_HUEY 0x2005 283#define USB_DEVICE_ID_GRETAGMACBETH_HUEY 0x2005
@@ -576,6 +580,9 @@
576#define USB_DEVICE_ID_SAMSUNG_IR_REMOTE 0x0001 580#define USB_DEVICE_ID_SAMSUNG_IR_REMOTE 0x0001
577#define USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE 0x0600 581#define USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE 0x0600
578 582
583#define USB_VENDOR_ID_SIGMA_MICRO 0x1c4f
584#define USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD 0x0002
585
579#define USB_VENDOR_ID_SKYCABLE 0x1223 586#define USB_VENDOR_ID_SKYCABLE 0x1223
580#define USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER 0x3F07 587#define USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER 0x3F07
581 588
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index 0ec91c18a421..f0fbd7bd239e 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -81,6 +81,28 @@ MODULE_PARM_DESC(report_undeciphered, "Report undeciphered multi-touch state fie
81#define NO_TOUCHES -1 81#define NO_TOUCHES -1
82#define SINGLE_TOUCH_UP -2 82#define SINGLE_TOUCH_UP -2
83 83
84/* Touch surface information. Dimension is in hundredths of a mm, min and max
85 * are in units. */
86#define MOUSE_DIMENSION_X (float)9056
87#define MOUSE_MIN_X -1100
88#define MOUSE_MAX_X 1258
89#define MOUSE_RES_X ((MOUSE_MAX_X - MOUSE_MIN_X) / (MOUSE_DIMENSION_X / 100))
90#define MOUSE_DIMENSION_Y (float)5152
91#define MOUSE_MIN_Y -1589
92#define MOUSE_MAX_Y 2047
93#define MOUSE_RES_Y ((MOUSE_MAX_Y - MOUSE_MIN_Y) / (MOUSE_DIMENSION_Y / 100))
94
95#define TRACKPAD_DIMENSION_X (float)13000
96#define TRACKPAD_MIN_X -2909
97#define TRACKPAD_MAX_X 3167
98#define TRACKPAD_RES_X \
99 ((TRACKPAD_MAX_X - TRACKPAD_MIN_X) / (TRACKPAD_DIMENSION_X / 100))
100#define TRACKPAD_DIMENSION_Y (float)11000
101#define TRACKPAD_MIN_Y -2456
102#define TRACKPAD_MAX_Y 2565
103#define TRACKPAD_RES_Y \
104 ((TRACKPAD_MAX_Y - TRACKPAD_MIN_Y) / (TRACKPAD_DIMENSION_Y / 100))
105
84/** 106/**
85 * struct magicmouse_sc - Tracks Magic Mouse-specific data. 107 * struct magicmouse_sc - Tracks Magic Mouse-specific data.
86 * @input: Input device through which we report events. 108 * @input: Input device through which we report events.
@@ -406,17 +428,31 @@ static void magicmouse_setup_input(struct input_dev *input, struct hid_device *h
406 * inverse of the reported Y. 428 * inverse of the reported Y.
407 */ 429 */
408 if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) { 430 if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) {
409 input_set_abs_params(input, ABS_MT_POSITION_X, -1100, 431 input_set_abs_params(input, ABS_MT_POSITION_X,
410 1358, 4, 0); 432 MOUSE_MIN_X, MOUSE_MAX_X, 4, 0);
411 input_set_abs_params(input, ABS_MT_POSITION_Y, -1589, 433 input_set_abs_params(input, ABS_MT_POSITION_Y,
412 2047, 4, 0); 434 MOUSE_MIN_Y, MOUSE_MAX_Y, 4, 0);
435
436 input_abs_set_res(input, ABS_MT_POSITION_X,
437 MOUSE_RES_X);
438 input_abs_set_res(input, ABS_MT_POSITION_Y,
439 MOUSE_RES_Y);
413 } else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */ 440 } else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
414 input_set_abs_params(input, ABS_X, -2909, 3167, 4, 0); 441 input_set_abs_params(input, ABS_X, TRACKPAD_MIN_X,
415 input_set_abs_params(input, ABS_Y, -2456, 2565, 4, 0); 442 TRACKPAD_MAX_X, 4, 0);
416 input_set_abs_params(input, ABS_MT_POSITION_X, -2909, 443 input_set_abs_params(input, ABS_Y, TRACKPAD_MIN_Y,
417 3167, 4, 0); 444 TRACKPAD_MAX_Y, 4, 0);
418 input_set_abs_params(input, ABS_MT_POSITION_Y, -2456, 445 input_set_abs_params(input, ABS_MT_POSITION_X,
419 2565, 4, 0); 446 TRACKPAD_MIN_X, TRACKPAD_MAX_X, 4, 0);
447 input_set_abs_params(input, ABS_MT_POSITION_Y,
448 TRACKPAD_MIN_Y, TRACKPAD_MAX_Y, 4, 0);
449
450 input_abs_set_res(input, ABS_X, TRACKPAD_RES_X);
451 input_abs_set_res(input, ABS_Y, TRACKPAD_RES_Y);
452 input_abs_set_res(input, ABS_MT_POSITION_X,
453 TRACKPAD_RES_X);
454 input_abs_set_res(input, ABS_MT_POSITION_Y,
455 TRACKPAD_RES_Y);
420 } 456 }
421 457
422 input_set_events_per_packet(input, 60); 458 input_set_events_per_packet(input, 60);
@@ -501,9 +537,17 @@ static int magicmouse_probe(struct hid_device *hdev,
501 } 537 }
502 report->size = 6; 538 report->size = 6;
503 539
540 /*
541 * Some devices repond with 'invalid report id' when feature
542 * report switching it into multitouch mode is sent to it.
543 *
544 * This results in -EIO from the _raw low-level transport callback,
545 * but there seems to be no other way of switching the mode.
546 * Thus the super-ugly hacky success check below.
547 */
504 ret = hdev->hid_output_raw_report(hdev, feature, sizeof(feature), 548 ret = hdev->hid_output_raw_report(hdev, feature, sizeof(feature),
505 HID_FEATURE_REPORT); 549 HID_FEATURE_REPORT);
506 if (ret != sizeof(feature)) { 550 if (ret != -EIO && ret != sizeof(feature)) {
507 hid_err(hdev, "unable to request touch data (%d)\n", ret); 551 hid_err(hdev, "unable to request touch data (%d)\n", ret);
508 goto err_stop_hw; 552 goto err_stop_hw;
509 } 553 }
diff --git a/drivers/hid/hid-wacom.c b/drivers/hid/hid-wacom.c
index 06888323828c..72ca689b6474 100644
--- a/drivers/hid/hid-wacom.c
+++ b/drivers/hid/hid-wacom.c
@@ -353,11 +353,7 @@ static int wacom_probe(struct hid_device *hdev,
353 if (ret) { 353 if (ret) {
354 hid_warn(hdev, "can't create sysfs battery attribute, err: %d\n", 354 hid_warn(hdev, "can't create sysfs battery attribute, err: %d\n",
355 ret); 355 ret);
356 /* 356 goto err_battery;
357 * battery attribute is not critical for the tablet, but if it
358 * failed then there is no need to create ac attribute
359 */
360 goto move_on;
361 } 357 }
362 358
363 wdata->ac.properties = wacom_ac_props; 359 wdata->ac.properties = wacom_ac_props;
@@ -371,18 +367,14 @@ static int wacom_probe(struct hid_device *hdev,
371 if (ret) { 367 if (ret) {
372 hid_warn(hdev, 368 hid_warn(hdev,
373 "can't create ac battery attribute, err: %d\n", ret); 369 "can't create ac battery attribute, err: %d\n", ret);
374 /* 370 goto err_ac;
375 * ac attribute is not critical for the tablet, but if it
376 * failed then we don't want to battery attribute to exist
377 */
378 power_supply_unregister(&wdata->battery);
379 } 371 }
380
381move_on:
382#endif 372#endif
383 hidinput = list_entry(hdev->inputs.next, struct hid_input, list); 373 hidinput = list_entry(hdev->inputs.next, struct hid_input, list);
384 input = hidinput->input; 374 input = hidinput->input;
385 375
376 __set_bit(INPUT_PROP_POINTER, input->propbit);
377
386 /* Basics */ 378 /* Basics */
387 input->evbit[0] |= BIT(EV_KEY) | BIT(EV_ABS) | BIT(EV_REL); 379 input->evbit[0] |= BIT(EV_KEY) | BIT(EV_ABS) | BIT(EV_REL);
388 380
@@ -416,6 +408,13 @@ move_on:
416 408
417 return 0; 409 return 0;
418 410
411#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
412err_ac:
413 power_supply_unregister(&wdata->battery);
414err_battery:
415 device_remove_file(&hdev->dev, &dev_attr_speed);
416 hid_hw_stop(hdev);
417#endif
419err_free: 418err_free:
420 kfree(wdata); 419 kfree(wdata);
421 return ret; 420 return ret;
@@ -426,6 +425,7 @@ static void wacom_remove(struct hid_device *hdev)
426#ifdef CONFIG_HID_WACOM_POWER_SUPPLY 425#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
427 struct wacom_data *wdata = hid_get_drvdata(hdev); 426 struct wacom_data *wdata = hid_get_drvdata(hdev);
428#endif 427#endif
428 device_remove_file(&hdev->dev, &dev_attr_speed);
429 hid_hw_stop(hdev); 429 hid_hw_stop(hdev);
430 430
431#ifdef CONFIG_HID_WACOM_POWER_SUPPLY 431#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
diff --git a/drivers/hid/hid-wiimote.c b/drivers/hid/hid-wiimote.c
index a594383ce03d..85a02e5f9fe8 100644
--- a/drivers/hid/hid-wiimote.c
+++ b/drivers/hid/hid-wiimote.c
@@ -10,10 +10,10 @@
10 * any later version. 10 * any later version.
11 */ 11 */
12 12
13#include <linux/atomic.h>
14#include <linux/device.h> 13#include <linux/device.h>
15#include <linux/hid.h> 14#include <linux/hid.h>
16#include <linux/input.h> 15#include <linux/input.h>
16#include <linux/leds.h>
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/spinlock.h> 18#include <linux/spinlock.h>
19#include "hid-ids.h" 19#include "hid-ids.h"
@@ -33,9 +33,9 @@ struct wiimote_state {
33}; 33};
34 34
35struct wiimote_data { 35struct wiimote_data {
36 atomic_t ready;
37 struct hid_device *hdev; 36 struct hid_device *hdev;
38 struct input_dev *input; 37 struct input_dev *input;
38 struct led_classdev *leds[4];
39 39
40 spinlock_t qlock; 40 spinlock_t qlock;
41 __u8 head; 41 __u8 head;
@@ -53,8 +53,15 @@ struct wiimote_data {
53#define WIIPROTO_FLAGS_LEDS (WIIPROTO_FLAG_LED1 | WIIPROTO_FLAG_LED2 | \ 53#define WIIPROTO_FLAGS_LEDS (WIIPROTO_FLAG_LED1 | WIIPROTO_FLAG_LED2 | \
54 WIIPROTO_FLAG_LED3 | WIIPROTO_FLAG_LED4) 54 WIIPROTO_FLAG_LED3 | WIIPROTO_FLAG_LED4)
55 55
56/* return flag for led \num */
57#define WIIPROTO_FLAG_LED(num) (WIIPROTO_FLAG_LED1 << (num - 1))
58
56enum wiiproto_reqs { 59enum wiiproto_reqs {
60 WIIPROTO_REQ_NULL = 0x0,
57 WIIPROTO_REQ_LED = 0x11, 61 WIIPROTO_REQ_LED = 0x11,
62 WIIPROTO_REQ_DRM = 0x12,
63 WIIPROTO_REQ_STATUS = 0x20,
64 WIIPROTO_REQ_RETURN = 0x22,
58 WIIPROTO_REQ_DRM_K = 0x30, 65 WIIPROTO_REQ_DRM_K = 0x30,
59}; 66};
60 67
@@ -87,9 +94,6 @@ static __u16 wiiproto_keymap[] = {
87 BTN_MODE, /* WIIPROTO_KEY_HOME */ 94 BTN_MODE, /* WIIPROTO_KEY_HOME */
88}; 95};
89 96
90#define dev_to_wii(pdev) hid_get_drvdata(container_of(pdev, struct hid_device, \
91 dev))
92
93static ssize_t wiimote_hid_send(struct hid_device *hdev, __u8 *buffer, 97static ssize_t wiimote_hid_send(struct hid_device *hdev, __u8 *buffer,
94 size_t count) 98 size_t count)
95{ 99{
@@ -192,66 +196,96 @@ static void wiiproto_req_leds(struct wiimote_data *wdata, int leds)
192 wiimote_queue(wdata, cmd, sizeof(cmd)); 196 wiimote_queue(wdata, cmd, sizeof(cmd));
193} 197}
194 198
195#define wiifs_led_show_set(num) \ 199/*
196static ssize_t wiifs_led_show_##num(struct device *dev, \ 200 * Check what peripherals of the wiimote are currently
197 struct device_attribute *attr, char *buf) \ 201 * active and select a proper DRM that supports all of
198{ \ 202 * the requested data inputs.
199 struct wiimote_data *wdata = dev_to_wii(dev); \ 203 */
200 unsigned long flags; \ 204static __u8 select_drm(struct wiimote_data *wdata)
201 int state; \ 205{
202 \ 206 return WIIPROTO_REQ_DRM_K;
203 if (!atomic_read(&wdata->ready)) \ 207}
204 return -EBUSY; \ 208
205 \ 209static void wiiproto_req_drm(struct wiimote_data *wdata, __u8 drm)
206 spin_lock_irqsave(&wdata->state.lock, flags); \ 210{
207 state = !!(wdata->state.flags & WIIPROTO_FLAG_LED##num); \ 211 __u8 cmd[3];
208 spin_unlock_irqrestore(&wdata->state.lock, flags); \ 212
209 \ 213 if (drm == WIIPROTO_REQ_NULL)
210 return sprintf(buf, "%d\n", state); \ 214 drm = select_drm(wdata);
211} \ 215
212static ssize_t wiifs_led_set_##num(struct device *dev, \ 216 cmd[0] = WIIPROTO_REQ_DRM;
213 struct device_attribute *attr, const char *buf, size_t count) \ 217 cmd[1] = 0;
214{ \ 218 cmd[2] = drm;
215 struct wiimote_data *wdata = dev_to_wii(dev); \ 219
216 int tmp = simple_strtoul(buf, NULL, 10); \ 220 wiimote_queue(wdata, cmd, sizeof(cmd));
217 unsigned long flags; \ 221}
218 __u8 state; \ 222
219 \ 223static enum led_brightness wiimote_leds_get(struct led_classdev *led_dev)
220 if (!atomic_read(&wdata->ready)) \ 224{
221 return -EBUSY; \ 225 struct wiimote_data *wdata;
222 \ 226 struct device *dev = led_dev->dev->parent;
223 spin_lock_irqsave(&wdata->state.lock, flags); \ 227 int i;
224 \ 228 unsigned long flags;
225 state = wdata->state.flags; \ 229 bool value = false;
226 \ 230
227 if (tmp) \ 231 wdata = hid_get_drvdata(container_of(dev, struct hid_device, dev));
228 wiiproto_req_leds(wdata, state | WIIPROTO_FLAG_LED##num);\ 232
229 else \ 233 for (i = 0; i < 4; ++i) {
230 wiiproto_req_leds(wdata, state & ~WIIPROTO_FLAG_LED##num);\ 234 if (wdata->leds[i] == led_dev) {
231 \ 235 spin_lock_irqsave(&wdata->state.lock, flags);
232 spin_unlock_irqrestore(&wdata->state.lock, flags); \ 236 value = wdata->state.flags & WIIPROTO_FLAG_LED(i + 1);
233 \ 237 spin_unlock_irqrestore(&wdata->state.lock, flags);
234 return count; \ 238 break;
235} \ 239 }
236static DEVICE_ATTR(led##num, S_IRUGO | S_IWUSR, wiifs_led_show_##num, \ 240 }
237 wiifs_led_set_##num) 241
238 242 return value ? LED_FULL : LED_OFF;
239wiifs_led_show_set(1); 243}
240wiifs_led_show_set(2); 244
241wiifs_led_show_set(3); 245static void wiimote_leds_set(struct led_classdev *led_dev,
242wiifs_led_show_set(4); 246 enum led_brightness value)
247{
248 struct wiimote_data *wdata;
249 struct device *dev = led_dev->dev->parent;
250 int i;
251 unsigned long flags;
252 __u8 state, flag;
253
254 wdata = hid_get_drvdata(container_of(dev, struct hid_device, dev));
255
256 for (i = 0; i < 4; ++i) {
257 if (wdata->leds[i] == led_dev) {
258 flag = WIIPROTO_FLAG_LED(i + 1);
259 spin_lock_irqsave(&wdata->state.lock, flags);
260 state = wdata->state.flags;
261 if (value == LED_OFF)
262 wiiproto_req_leds(wdata, state & ~flag);
263 else
264 wiiproto_req_leds(wdata, state | flag);
265 spin_unlock_irqrestore(&wdata->state.lock, flags);
266 break;
267 }
268 }
269}
243 270
244static int wiimote_input_event(struct input_dev *dev, unsigned int type, 271static int wiimote_input_event(struct input_dev *dev, unsigned int type,
245 unsigned int code, int value) 272 unsigned int code, int value)
246{ 273{
274 return 0;
275}
276
277static int wiimote_input_open(struct input_dev *dev)
278{
247 struct wiimote_data *wdata = input_get_drvdata(dev); 279 struct wiimote_data *wdata = input_get_drvdata(dev);
248 280
249 if (!atomic_read(&wdata->ready)) 281 return hid_hw_open(wdata->hdev);
250 return -EBUSY; 282}
251 /* smp_rmb: Make sure wdata->xy is available when wdata->ready is 1 */
252 smp_rmb();
253 283
254 return 0; 284static void wiimote_input_close(struct input_dev *dev)
285{
286 struct wiimote_data *wdata = input_get_drvdata(dev);
287
288 hid_hw_close(wdata->hdev);
255} 289}
256 290
257static void handler_keys(struct wiimote_data *wdata, const __u8 *payload) 291static void handler_keys(struct wiimote_data *wdata, const __u8 *payload)
@@ -281,6 +315,26 @@ static void handler_keys(struct wiimote_data *wdata, const __u8 *payload)
281 input_sync(wdata->input); 315 input_sync(wdata->input);
282} 316}
283 317
318static void handler_status(struct wiimote_data *wdata, const __u8 *payload)
319{
320 handler_keys(wdata, payload);
321
322 /* on status reports the drm is reset so we need to resend the drm */
323 wiiproto_req_drm(wdata, WIIPROTO_REQ_NULL);
324}
325
326static void handler_return(struct wiimote_data *wdata, const __u8 *payload)
327{
328 __u8 err = payload[3];
329 __u8 cmd = payload[2];
330
331 handler_keys(wdata, payload);
332
333 if (err)
334 hid_warn(wdata->hdev, "Remote error %hhu on req %hhu\n", err,
335 cmd);
336}
337
284struct wiiproto_handler { 338struct wiiproto_handler {
285 __u8 id; 339 __u8 id;
286 size_t size; 340 size_t size;
@@ -288,6 +342,8 @@ struct wiiproto_handler {
288}; 342};
289 343
290static struct wiiproto_handler handlers[] = { 344static struct wiiproto_handler handlers[] = {
345 { .id = WIIPROTO_REQ_STATUS, .size = 6, .func = handler_status },
346 { .id = WIIPROTO_REQ_RETURN, .size = 4, .func = handler_return },
291 { .id = WIIPROTO_REQ_DRM_K, .size = 2, .func = handler_keys }, 347 { .id = WIIPROTO_REQ_DRM_K, .size = 2, .func = handler_keys },
292 { .id = 0 } 348 { .id = 0 }
293}; 349};
@@ -300,11 +356,6 @@ static int wiimote_hid_event(struct hid_device *hdev, struct hid_report *report,
300 int i; 356 int i;
301 unsigned long flags; 357 unsigned long flags;
302 358
303 if (!atomic_read(&wdata->ready))
304 return -EBUSY;
305 /* smp_rmb: Make sure wdata->xy is available when wdata->ready is 1 */
306 smp_rmb();
307
308 if (size < 1) 359 if (size < 1)
309 return -EINVAL; 360 return -EINVAL;
310 361
@@ -321,6 +372,58 @@ static int wiimote_hid_event(struct hid_device *hdev, struct hid_report *report,
321 return 0; 372 return 0;
322} 373}
323 374
375static void wiimote_leds_destroy(struct wiimote_data *wdata)
376{
377 int i;
378 struct led_classdev *led;
379
380 for (i = 0; i < 4; ++i) {
381 if (wdata->leds[i]) {
382 led = wdata->leds[i];
383 wdata->leds[i] = NULL;
384 led_classdev_unregister(led);
385 kfree(led);
386 }
387 }
388}
389
390static int wiimote_leds_create(struct wiimote_data *wdata)
391{
392 int i, ret;
393 struct device *dev = &wdata->hdev->dev;
394 size_t namesz = strlen(dev_name(dev)) + 9;
395 struct led_classdev *led;
396 char *name;
397
398 for (i = 0; i < 4; ++i) {
399 led = kzalloc(sizeof(struct led_classdev) + namesz, GFP_KERNEL);
400 if (!led) {
401 ret = -ENOMEM;
402 goto err;
403 }
404 name = (void*)&led[1];
405 snprintf(name, namesz, "%s:blue:p%d", dev_name(dev), i);
406 led->name = name;
407 led->brightness = 0;
408 led->max_brightness = 1;
409 led->brightness_get = wiimote_leds_get;
410 led->brightness_set = wiimote_leds_set;
411
412 ret = led_classdev_register(dev, led);
413 if (ret) {
414 kfree(led);
415 goto err;
416 }
417 wdata->leds[i] = led;
418 }
419
420 return 0;
421
422err:
423 wiimote_leds_destroy(wdata);
424 return ret;
425}
426
324static struct wiimote_data *wiimote_create(struct hid_device *hdev) 427static struct wiimote_data *wiimote_create(struct hid_device *hdev)
325{ 428{
326 struct wiimote_data *wdata; 429 struct wiimote_data *wdata;
@@ -341,6 +444,8 @@ static struct wiimote_data *wiimote_create(struct hid_device *hdev)
341 444
342 input_set_drvdata(wdata->input, wdata); 445 input_set_drvdata(wdata->input, wdata);
343 wdata->input->event = wiimote_input_event; 446 wdata->input->event = wiimote_input_event;
447 wdata->input->open = wiimote_input_open;
448 wdata->input->close = wiimote_input_close;
344 wdata->input->dev.parent = &wdata->hdev->dev; 449 wdata->input->dev.parent = &wdata->hdev->dev;
345 wdata->input->id.bustype = wdata->hdev->bus; 450 wdata->input->id.bustype = wdata->hdev->bus;
346 wdata->input->id.vendor = wdata->hdev->vendor; 451 wdata->input->id.vendor = wdata->hdev->vendor;
@@ -362,6 +467,12 @@ static struct wiimote_data *wiimote_create(struct hid_device *hdev)
362 467
363static void wiimote_destroy(struct wiimote_data *wdata) 468static void wiimote_destroy(struct wiimote_data *wdata)
364{ 469{
470 wiimote_leds_destroy(wdata);
471
472 input_unregister_device(wdata->input);
473 cancel_work_sync(&wdata->worker);
474 hid_hw_stop(wdata->hdev);
475
365 kfree(wdata); 476 kfree(wdata);
366} 477}
367 478
@@ -377,19 +488,6 @@ static int wiimote_hid_probe(struct hid_device *hdev,
377 return -ENOMEM; 488 return -ENOMEM;
378 } 489 }
379 490
380 ret = device_create_file(&hdev->dev, &dev_attr_led1);
381 if (ret)
382 goto err;
383 ret = device_create_file(&hdev->dev, &dev_attr_led2);
384 if (ret)
385 goto err;
386 ret = device_create_file(&hdev->dev, &dev_attr_led3);
387 if (ret)
388 goto err;
389 ret = device_create_file(&hdev->dev, &dev_attr_led4);
390 if (ret)
391 goto err;
392
393 ret = hid_parse(hdev); 491 ret = hid_parse(hdev);
394 if (ret) { 492 if (ret) {
395 hid_err(hdev, "HID parse failed\n"); 493 hid_err(hdev, "HID parse failed\n");
@@ -408,9 +506,10 @@ static int wiimote_hid_probe(struct hid_device *hdev,
408 goto err_stop; 506 goto err_stop;
409 } 507 }
410 508
411 /* smp_wmb: Write wdata->xy first before wdata->ready is set to 1 */ 509 ret = wiimote_leds_create(wdata);
412 smp_wmb(); 510 if (ret)
413 atomic_set(&wdata->ready, 1); 511 goto err_free;
512
414 hid_info(hdev, "New device registered\n"); 513 hid_info(hdev, "New device registered\n");
415 514
416 /* by default set led1 after device initialization */ 515 /* by default set led1 after device initialization */
@@ -420,15 +519,15 @@ static int wiimote_hid_probe(struct hid_device *hdev,
420 519
421 return 0; 520 return 0;
422 521
522err_free:
523 wiimote_destroy(wdata);
524 return ret;
525
423err_stop: 526err_stop:
424 hid_hw_stop(hdev); 527 hid_hw_stop(hdev);
425err: 528err:
426 input_free_device(wdata->input); 529 input_free_device(wdata->input);
427 device_remove_file(&hdev->dev, &dev_attr_led1); 530 kfree(wdata);
428 device_remove_file(&hdev->dev, &dev_attr_led2);
429 device_remove_file(&hdev->dev, &dev_attr_led3);
430 device_remove_file(&hdev->dev, &dev_attr_led4);
431 wiimote_destroy(wdata);
432 return ret; 531 return ret;
433} 532}
434 533
@@ -437,16 +536,6 @@ static void wiimote_hid_remove(struct hid_device *hdev)
437 struct wiimote_data *wdata = hid_get_drvdata(hdev); 536 struct wiimote_data *wdata = hid_get_drvdata(hdev);
438 537
439 hid_info(hdev, "Device removed\n"); 538 hid_info(hdev, "Device removed\n");
440
441 device_remove_file(&hdev->dev, &dev_attr_led1);
442 device_remove_file(&hdev->dev, &dev_attr_led2);
443 device_remove_file(&hdev->dev, &dev_attr_led3);
444 device_remove_file(&hdev->dev, &dev_attr_led4);
445
446 hid_hw_stop(hdev);
447 input_unregister_device(wdata->input);
448
449 cancel_work_sync(&wdata->worker);
450 wiimote_destroy(wdata); 539 wiimote_destroy(wdata);
451} 540}
452 541
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 621959d5cc42..3146fdcda272 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -47,6 +47,7 @@ static const struct hid_blacklist {
47 { USB_VENDOR_ID_AFATECH, USB_DEVICE_ID_AFATECH_AF9016, HID_QUIRK_FULLSPEED_INTERVAL }, 47 { USB_VENDOR_ID_AFATECH, USB_DEVICE_ID_AFATECH_AF9016, HID_QUIRK_FULLSPEED_INTERVAL },
48 48
49 { USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH, HID_QUIRK_MULTI_INPUT }, 49 { USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH, HID_QUIRK_MULTI_INPUT },
50 { USB_VENDOR_ID_GREENASIA, USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD, HID_QUIRK_MULTI_INPUT },
50 { USB_VENDOR_ID_PANTHERLORD, USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK, HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS }, 51 { USB_VENDOR_ID_PANTHERLORD, USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK, HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS },
51 { USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII, HID_QUIRK_MULTI_INPUT }, 52 { USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII, HID_QUIRK_MULTI_INPUT },
52 { USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS, HID_QUIRK_MULTI_INPUT }, 53 { USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS, HID_QUIRK_MULTI_INPUT },
@@ -89,6 +90,7 @@ static const struct hid_blacklist {
89 90
90 { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_MULTI_TOUCH, HID_QUIRK_MULTI_INPUT }, 91 { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_MULTI_TOUCH, HID_QUIRK_MULTI_INPUT },
91 { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS, HID_QUIRK_MULTI_INPUT }, 92 { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS, HID_QUIRK_MULTI_INPUT },
93 { USB_VENDOR_ID_SIGMA_MICRO, USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD, HID_QUIRK_NO_INIT_REPORTS },
92 { 0, 0 } 94 { 0, 0 }
93}; 95};
94 96
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 59d83e83da7f..932383786642 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -36,17 +36,25 @@
36#include <linux/cpu.h> 36#include <linux/cpu.h>
37#include <linux/pci.h> 37#include <linux/pci.h>
38#include <linux/smp.h> 38#include <linux/smp.h>
39#include <linux/moduleparam.h>
39#include <asm/msr.h> 40#include <asm/msr.h>
40#include <asm/processor.h> 41#include <asm/processor.h>
41 42
42#define DRVNAME "coretemp" 43#define DRVNAME "coretemp"
43 44
45/*
46 * force_tjmax only matters when TjMax can't be read from the CPU itself.
47 * When set, it replaces the driver's suboptimal heuristic.
48 */
49static int force_tjmax;
50module_param_named(tjmax, force_tjmax, int, 0444);
51MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
52
44#define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */ 53#define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */
45#define NUM_REAL_CORES 16 /* Number of Real cores per cpu */ 54#define NUM_REAL_CORES 16 /* Number of Real cores per cpu */
46#define CORETEMP_NAME_LENGTH 17 /* String Length of attrs */ 55#define CORETEMP_NAME_LENGTH 17 /* String Length of attrs */
47#define MAX_CORE_ATTRS 4 /* Maximum no of basic attrs */ 56#define MAX_CORE_ATTRS 4 /* Maximum no of basic attrs */
48#define MAX_THRESH_ATTRS 3 /* Maximum no of Threshold attrs */ 57#define TOTAL_ATTRS (MAX_CORE_ATTRS + 1)
49#define TOTAL_ATTRS (MAX_CORE_ATTRS + MAX_THRESH_ATTRS)
50#define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO) 58#define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO)
51 59
52#ifdef CONFIG_SMP 60#ifdef CONFIG_SMP
@@ -69,8 +77,6 @@
69 * This value is passed as "id" field to rdmsr/wrmsr functions. 77 * This value is passed as "id" field to rdmsr/wrmsr functions.
70 * @status_reg: One of IA32_THERM_STATUS or IA32_PACKAGE_THERM_STATUS, 78 * @status_reg: One of IA32_THERM_STATUS or IA32_PACKAGE_THERM_STATUS,
71 * from where the temperature values should be read. 79 * from where the temperature values should be read.
72 * @intrpt_reg: One of IA32_THERM_INTERRUPT or IA32_PACKAGE_THERM_INTERRUPT,
73 * from where the thresholds are read.
74 * @attr_size: Total number of pre-core attrs displayed in the sysfs. 80 * @attr_size: Total number of pre-core attrs displayed in the sysfs.
75 * @is_pkg_data: If this is 1, the temp_data holds pkgtemp data. 81 * @is_pkg_data: If this is 1, the temp_data holds pkgtemp data.
76 * Otherwise, temp_data holds coretemp data. 82 * Otherwise, temp_data holds coretemp data.
@@ -79,13 +85,11 @@
79struct temp_data { 85struct temp_data {
80 int temp; 86 int temp;
81 int ttarget; 87 int ttarget;
82 int tmin;
83 int tjmax; 88 int tjmax;
84 unsigned long last_updated; 89 unsigned long last_updated;
85 unsigned int cpu; 90 unsigned int cpu;
86 u32 cpu_core_id; 91 u32 cpu_core_id;
87 u32 status_reg; 92 u32 status_reg;
88 u32 intrpt_reg;
89 int attr_size; 93 int attr_size;
90 bool is_pkg_data; 94 bool is_pkg_data;
91 bool valid; 95 bool valid;
@@ -143,19 +147,6 @@ static ssize_t show_crit_alarm(struct device *dev,
143 return sprintf(buf, "%d\n", (eax >> 5) & 1); 147 return sprintf(buf, "%d\n", (eax >> 5) & 1);
144} 148}
145 149
146static ssize_t show_max_alarm(struct device *dev,
147 struct device_attribute *devattr, char *buf)
148{
149 u32 eax, edx;
150 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
151 struct platform_data *pdata = dev_get_drvdata(dev);
152 struct temp_data *tdata = pdata->core_data[attr->index];
153
154 rdmsr_on_cpu(tdata->cpu, tdata->status_reg, &eax, &edx);
155
156 return sprintf(buf, "%d\n", !!(eax & THERM_STATUS_THRESHOLD1));
157}
158
159static ssize_t show_tjmax(struct device *dev, 150static ssize_t show_tjmax(struct device *dev,
160 struct device_attribute *devattr, char *buf) 151 struct device_attribute *devattr, char *buf)
161{ 152{
@@ -174,83 +165,6 @@ static ssize_t show_ttarget(struct device *dev,
174 return sprintf(buf, "%d\n", pdata->core_data[attr->index]->ttarget); 165 return sprintf(buf, "%d\n", pdata->core_data[attr->index]->ttarget);
175} 166}
176 167
177static ssize_t store_ttarget(struct device *dev,
178 struct device_attribute *devattr,
179 const char *buf, size_t count)
180{
181 struct platform_data *pdata = dev_get_drvdata(dev);
182 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
183 struct temp_data *tdata = pdata->core_data[attr->index];
184 u32 eax, edx;
185 unsigned long val;
186 int diff;
187
188 if (strict_strtoul(buf, 10, &val))
189 return -EINVAL;
190
191 /*
192 * THERM_MASK_THRESHOLD1 is 7 bits wide. Values are entered in terms
193 * of milli degree celsius. Hence don't accept val > (127 * 1000)
194 */
195 if (val > tdata->tjmax || val > 127000)
196 return -EINVAL;
197
198 diff = (tdata->tjmax - val) / 1000;
199
200 mutex_lock(&tdata->update_lock);
201 rdmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, &eax, &edx);
202 eax = (eax & ~THERM_MASK_THRESHOLD1) |
203 (diff << THERM_SHIFT_THRESHOLD1);
204 wrmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, eax, edx);
205 tdata->ttarget = val;
206 mutex_unlock(&tdata->update_lock);
207
208 return count;
209}
210
211static ssize_t show_tmin(struct device *dev,
212 struct device_attribute *devattr, char *buf)
213{
214 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
215 struct platform_data *pdata = dev_get_drvdata(dev);
216
217 return sprintf(buf, "%d\n", pdata->core_data[attr->index]->tmin);
218}
219
220static ssize_t store_tmin(struct device *dev,
221 struct device_attribute *devattr,
222 const char *buf, size_t count)
223{
224 struct platform_data *pdata = dev_get_drvdata(dev);
225 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
226 struct temp_data *tdata = pdata->core_data[attr->index];
227 u32 eax, edx;
228 unsigned long val;
229 int diff;
230
231 if (strict_strtoul(buf, 10, &val))
232 return -EINVAL;
233
234 /*
235 * THERM_MASK_THRESHOLD0 is 7 bits wide. Values are entered in terms
236 * of milli degree celsius. Hence don't accept val > (127 * 1000)
237 */
238 if (val > tdata->tjmax || val > 127000)
239 return -EINVAL;
240
241 diff = (tdata->tjmax - val) / 1000;
242
243 mutex_lock(&tdata->update_lock);
244 rdmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, &eax, &edx);
245 eax = (eax & ~THERM_MASK_THRESHOLD0) |
246 (diff << THERM_SHIFT_THRESHOLD0);
247 wrmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, eax, edx);
248 tdata->tmin = val;
249 mutex_unlock(&tdata->update_lock);
250
251 return count;
252}
253
254static ssize_t show_temp(struct device *dev, 168static ssize_t show_temp(struct device *dev,
255 struct device_attribute *devattr, char *buf) 169 struct device_attribute *devattr, char *buf)
256{ 170{
@@ -374,7 +288,6 @@ static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
374 288
375static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev) 289static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
376{ 290{
377 /* The 100C is default for both mobile and non mobile CPUs */
378 int err; 291 int err;
379 u32 eax, edx; 292 u32 eax, edx;
380 u32 val; 293 u32 val;
@@ -385,7 +298,8 @@ static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
385 */ 298 */
386 err = rdmsr_safe_on_cpu(id, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx); 299 err = rdmsr_safe_on_cpu(id, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
387 if (err) { 300 if (err) {
388 dev_warn(dev, "Unable to read TjMax from CPU.\n"); 301 if (c->x86_model > 0xe && c->x86_model != 0x1c)
302 dev_warn(dev, "Unable to read TjMax from CPU %u\n", id);
389 } else { 303 } else {
390 val = (eax >> 16) & 0xff; 304 val = (eax >> 16) & 0xff;
391 /* 305 /*
@@ -393,11 +307,17 @@ static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
393 * will be used 307 * will be used
394 */ 308 */
395 if (val) { 309 if (val) {
396 dev_info(dev, "TjMax is %d C.\n", val); 310 dev_dbg(dev, "TjMax is %d degrees C\n", val);
397 return val * 1000; 311 return val * 1000;
398 } 312 }
399 } 313 }
400 314
315 if (force_tjmax) {
316 dev_notice(dev, "TjMax forced to %d degrees C by user\n",
317 force_tjmax);
318 return force_tjmax * 1000;
319 }
320
401 /* 321 /*
402 * An assumption is made for early CPUs and unreadable MSR. 322 * An assumption is made for early CPUs and unreadable MSR.
403 * NOTE: the calculated value may not be correct. 323 * NOTE: the calculated value may not be correct.
@@ -414,21 +334,6 @@ static void __devinit get_ucode_rev_on_cpu(void *edx)
414 rdmsr(MSR_IA32_UCODE_REV, eax, *(u32 *)edx); 334 rdmsr(MSR_IA32_UCODE_REV, eax, *(u32 *)edx);
415} 335}
416 336
417static int get_pkg_tjmax(unsigned int cpu, struct device *dev)
418{
419 int err;
420 u32 eax, edx, val;
421
422 err = rdmsr_safe_on_cpu(cpu, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
423 if (!err) {
424 val = (eax >> 16) & 0xff;
425 if (val)
426 return val * 1000;
427 }
428 dev_warn(dev, "Unable to read Pkg-TjMax from CPU:%u\n", cpu);
429 return 100000; /* Default TjMax: 100 degree celsius */
430}
431
432static int create_name_attr(struct platform_data *pdata, struct device *dev) 337static int create_name_attr(struct platform_data *pdata, struct device *dev)
433{ 338{
434 sysfs_attr_init(&pdata->name_attr.attr); 339 sysfs_attr_init(&pdata->name_attr.attr);
@@ -442,19 +347,14 @@ static int create_core_attrs(struct temp_data *tdata, struct device *dev,
442 int attr_no) 347 int attr_no)
443{ 348{
444 int err, i; 349 int err, i;
445 static ssize_t (*rd_ptr[TOTAL_ATTRS]) (struct device *dev, 350 static ssize_t (*const rd_ptr[TOTAL_ATTRS]) (struct device *dev,
446 struct device_attribute *devattr, char *buf) = { 351 struct device_attribute *devattr, char *buf) = {
447 show_label, show_crit_alarm, show_temp, show_tjmax, 352 show_label, show_crit_alarm, show_temp, show_tjmax,
448 show_max_alarm, show_ttarget, show_tmin }; 353 show_ttarget };
449 static ssize_t (*rw_ptr[TOTAL_ATTRS]) (struct device *dev, 354 static const char *const names[TOTAL_ATTRS] = {
450 struct device_attribute *devattr, const char *buf,
451 size_t count) = { NULL, NULL, NULL, NULL, NULL,
452 store_ttarget, store_tmin };
453 static const char *names[TOTAL_ATTRS] = {
454 "temp%d_label", "temp%d_crit_alarm", 355 "temp%d_label", "temp%d_crit_alarm",
455 "temp%d_input", "temp%d_crit", 356 "temp%d_input", "temp%d_crit",
456 "temp%d_max_alarm", "temp%d_max", 357 "temp%d_max" };
457 "temp%d_max_hyst" };
458 358
459 for (i = 0; i < tdata->attr_size; i++) { 359 for (i = 0; i < tdata->attr_size; i++) {
460 snprintf(tdata->attr_name[i], CORETEMP_NAME_LENGTH, names[i], 360 snprintf(tdata->attr_name[i], CORETEMP_NAME_LENGTH, names[i],
@@ -462,10 +362,6 @@ static int create_core_attrs(struct temp_data *tdata, struct device *dev,
462 sysfs_attr_init(&tdata->sd_attrs[i].dev_attr.attr); 362 sysfs_attr_init(&tdata->sd_attrs[i].dev_attr.attr);
463 tdata->sd_attrs[i].dev_attr.attr.name = tdata->attr_name[i]; 363 tdata->sd_attrs[i].dev_attr.attr.name = tdata->attr_name[i];
464 tdata->sd_attrs[i].dev_attr.attr.mode = S_IRUGO; 364 tdata->sd_attrs[i].dev_attr.attr.mode = S_IRUGO;
465 if (rw_ptr[i]) {
466 tdata->sd_attrs[i].dev_attr.attr.mode |= S_IWUSR;
467 tdata->sd_attrs[i].dev_attr.store = rw_ptr[i];
468 }
469 tdata->sd_attrs[i].dev_attr.show = rd_ptr[i]; 365 tdata->sd_attrs[i].dev_attr.show = rd_ptr[i];
470 tdata->sd_attrs[i].index = attr_no; 366 tdata->sd_attrs[i].index = attr_no;
471 err = device_create_file(dev, &tdata->sd_attrs[i].dev_attr); 367 err = device_create_file(dev, &tdata->sd_attrs[i].dev_attr);
@@ -481,9 +377,9 @@ exit_free:
481} 377}
482 378
483 379
484static int __devinit chk_ucode_version(struct platform_device *pdev) 380static int __cpuinit chk_ucode_version(unsigned int cpu)
485{ 381{
486 struct cpuinfo_x86 *c = &cpu_data(pdev->id); 382 struct cpuinfo_x86 *c = &cpu_data(cpu);
487 int err; 383 int err;
488 u32 edx; 384 u32 edx;
489 385
@@ -494,17 +390,15 @@ static int __devinit chk_ucode_version(struct platform_device *pdev)
494 */ 390 */
495 if (c->x86_model == 0xe && c->x86_mask < 0xc) { 391 if (c->x86_model == 0xe && c->x86_mask < 0xc) {
496 /* check for microcode update */ 392 /* check for microcode update */
497 err = smp_call_function_single(pdev->id, get_ucode_rev_on_cpu, 393 err = smp_call_function_single(cpu, get_ucode_rev_on_cpu,
498 &edx, 1); 394 &edx, 1);
499 if (err) { 395 if (err) {
500 dev_err(&pdev->dev, 396 pr_err("Cannot determine microcode revision of "
501 "Cannot determine microcode revision of " 397 "CPU#%u (%d)!\n", cpu, err);
502 "CPU#%u (%d)!\n", pdev->id, err);
503 return -ENODEV; 398 return -ENODEV;
504 } else if (edx < 0x39) { 399 } else if (edx < 0x39) {
505 dev_err(&pdev->dev, 400 pr_err("Errata AE18 not fixed, update BIOS or "
506 "Errata AE18 not fixed, update BIOS or " 401 "microcode of the CPU!\n");
507 "microcode of the CPU!\n");
508 return -ENODEV; 402 return -ENODEV;
509 } 403 }
510 } 404 }
@@ -538,8 +432,6 @@ static struct temp_data *init_temp_data(unsigned int cpu, int pkg_flag)
538 432
539 tdata->status_reg = pkg_flag ? MSR_IA32_PACKAGE_THERM_STATUS : 433 tdata->status_reg = pkg_flag ? MSR_IA32_PACKAGE_THERM_STATUS :
540 MSR_IA32_THERM_STATUS; 434 MSR_IA32_THERM_STATUS;
541 tdata->intrpt_reg = pkg_flag ? MSR_IA32_PACKAGE_THERM_INTERRUPT :
542 MSR_IA32_THERM_INTERRUPT;
543 tdata->is_pkg_data = pkg_flag; 435 tdata->is_pkg_data = pkg_flag;
544 tdata->cpu = cpu; 436 tdata->cpu = cpu;
545 tdata->cpu_core_id = TO_CORE_ID(cpu); 437 tdata->cpu_core_id = TO_CORE_ID(cpu);
@@ -548,11 +440,11 @@ static struct temp_data *init_temp_data(unsigned int cpu, int pkg_flag)
548 return tdata; 440 return tdata;
549} 441}
550 442
551static int create_core_data(struct platform_data *pdata, 443static int create_core_data(struct platform_device *pdev,
552 struct platform_device *pdev,
553 unsigned int cpu, int pkg_flag) 444 unsigned int cpu, int pkg_flag)
554{ 445{
555 struct temp_data *tdata; 446 struct temp_data *tdata;
447 struct platform_data *pdata = platform_get_drvdata(pdev);
556 struct cpuinfo_x86 *c = &cpu_data(cpu); 448 struct cpuinfo_x86 *c = &cpu_data(cpu);
557 u32 eax, edx; 449 u32 eax, edx;
558 int err, attr_no; 450 int err, attr_no;
@@ -588,20 +480,21 @@ static int create_core_data(struct platform_data *pdata,
588 goto exit_free; 480 goto exit_free;
589 481
590 /* We can access status register. Get Critical Temperature */ 482 /* We can access status register. Get Critical Temperature */
591 if (pkg_flag) 483 tdata->tjmax = get_tjmax(c, cpu, &pdev->dev);
592 tdata->tjmax = get_pkg_tjmax(pdev->id, &pdev->dev);
593 else
594 tdata->tjmax = get_tjmax(c, cpu, &pdev->dev);
595 484
596 /* 485 /*
597 * Test if we can access the intrpt register. If so, increase the 486 * Read the still undocumented bits 8:15 of IA32_TEMPERATURE_TARGET.
598 * 'size' enough to have ttarget/tmin/max_alarm interfaces. 487 * The target temperature is available on older CPUs but not in this
599 * Initialize ttarget with bits 16:22 of MSR_IA32_THERM_INTERRUPT 488 * register. Atoms don't have the register at all.
600 */ 489 */
601 err = rdmsr_safe_on_cpu(cpu, tdata->intrpt_reg, &eax, &edx); 490 if (c->x86_model > 0xe && c->x86_model != 0x1c) {
602 if (!err) { 491 err = rdmsr_safe_on_cpu(cpu, MSR_IA32_TEMPERATURE_TARGET,
603 tdata->attr_size += MAX_THRESH_ATTRS; 492 &eax, &edx);
604 tdata->ttarget = tdata->tjmax - ((eax >> 16) & 0x7f) * 1000; 493 if (!err) {
494 tdata->ttarget
495 = tdata->tjmax - ((eax >> 8) & 0xff) * 1000;
496 tdata->attr_size++;
497 }
605 } 498 }
606 499
607 pdata->core_data[attr_no] = tdata; 500 pdata->core_data[attr_no] = tdata;
@@ -613,22 +506,20 @@ static int create_core_data(struct platform_data *pdata,
613 506
614 return 0; 507 return 0;
615exit_free: 508exit_free:
509 pdata->core_data[attr_no] = NULL;
616 kfree(tdata); 510 kfree(tdata);
617 return err; 511 return err;
618} 512}
619 513
620static void coretemp_add_core(unsigned int cpu, int pkg_flag) 514static void coretemp_add_core(unsigned int cpu, int pkg_flag)
621{ 515{
622 struct platform_data *pdata;
623 struct platform_device *pdev = coretemp_get_pdev(cpu); 516 struct platform_device *pdev = coretemp_get_pdev(cpu);
624 int err; 517 int err;
625 518
626 if (!pdev) 519 if (!pdev)
627 return; 520 return;
628 521
629 pdata = platform_get_drvdata(pdev); 522 err = create_core_data(pdev, cpu, pkg_flag);
630
631 err = create_core_data(pdata, pdev, cpu, pkg_flag);
632 if (err) 523 if (err)
633 dev_err(&pdev->dev, "Adding Core %u failed\n", cpu); 524 dev_err(&pdev->dev, "Adding Core %u failed\n", cpu);
634} 525}
@@ -652,11 +543,6 @@ static int __devinit coretemp_probe(struct platform_device *pdev)
652 struct platform_data *pdata; 543 struct platform_data *pdata;
653 int err; 544 int err;
654 545
655 /* Check the microcode version of the CPU */
656 err = chk_ucode_version(pdev);
657 if (err)
658 return err;
659
660 /* Initialize the per-package data structures */ 546 /* Initialize the per-package data structures */
661 pdata = kzalloc(sizeof(struct platform_data), GFP_KERNEL); 547 pdata = kzalloc(sizeof(struct platform_data), GFP_KERNEL);
662 if (!pdata) 548 if (!pdata)
@@ -666,7 +552,7 @@ static int __devinit coretemp_probe(struct platform_device *pdev)
666 if (err) 552 if (err)
667 goto exit_free; 553 goto exit_free;
668 554
669 pdata->phys_proc_id = TO_PHYS_ID(pdev->id); 555 pdata->phys_proc_id = pdev->id;
670 platform_set_drvdata(pdev, pdata); 556 platform_set_drvdata(pdev, pdata);
671 557
672 pdata->hwmon_dev = hwmon_device_register(&pdev->dev); 558 pdata->hwmon_dev = hwmon_device_register(&pdev->dev);
@@ -718,7 +604,7 @@ static int __cpuinit coretemp_device_add(unsigned int cpu)
718 604
719 mutex_lock(&pdev_list_mutex); 605 mutex_lock(&pdev_list_mutex);
720 606
721 pdev = platform_device_alloc(DRVNAME, cpu); 607 pdev = platform_device_alloc(DRVNAME, TO_PHYS_ID(cpu));
722 if (!pdev) { 608 if (!pdev) {
723 err = -ENOMEM; 609 err = -ENOMEM;
724 pr_err("Device allocation failed\n"); 610 pr_err("Device allocation failed\n");
@@ -738,7 +624,7 @@ static int __cpuinit coretemp_device_add(unsigned int cpu)
738 } 624 }
739 625
740 pdev_entry->pdev = pdev; 626 pdev_entry->pdev = pdev;
741 pdev_entry->phys_proc_id = TO_PHYS_ID(cpu); 627 pdev_entry->phys_proc_id = pdev->id;
742 628
743 list_add_tail(&pdev_entry->list, &pdev_list); 629 list_add_tail(&pdev_entry->list, &pdev_list);
744 mutex_unlock(&pdev_list_mutex); 630 mutex_unlock(&pdev_list_mutex);
@@ -799,6 +685,10 @@ static void __cpuinit get_core_online(unsigned int cpu)
799 return; 685 return;
800 686
801 if (!pdev) { 687 if (!pdev) {
688 /* Check the microcode version of the CPU */
689 if (chk_ucode_version(cpu))
690 return;
691
802 /* 692 /*
803 * Alright, we have DTS support. 693 * Alright, we have DTS support.
804 * We are bringing the _first_ core in this pkg 694 * We are bringing the _first_ core in this pkg
diff --git a/drivers/hwmon/ds620.c b/drivers/hwmon/ds620.c
index 257957c69d92..4f7c3fc40a89 100644
--- a/drivers/hwmon/ds620.c
+++ b/drivers/hwmon/ds620.c
@@ -72,7 +72,7 @@ struct ds620_data {
72 char valid; /* !=0 if following fields are valid */ 72 char valid; /* !=0 if following fields are valid */
73 unsigned long last_updated; /* In jiffies */ 73 unsigned long last_updated; /* In jiffies */
74 74
75 u16 temp[3]; /* Register values, word */ 75 s16 temp[3]; /* Register values, word */
76}; 76};
77 77
78/* 78/*
diff --git a/drivers/hwmon/i5k_amb.c b/drivers/hwmon/i5k_amb.c
index c4c40be0edbf..d22f241b6a67 100644
--- a/drivers/hwmon/i5k_amb.c
+++ b/drivers/hwmon/i5k_amb.c
@@ -114,7 +114,6 @@ struct i5k_amb_data {
114 void __iomem *amb_mmio; 114 void __iomem *amb_mmio;
115 struct i5k_device_attribute *attrs; 115 struct i5k_device_attribute *attrs;
116 unsigned int num_attrs; 116 unsigned int num_attrs;
117 unsigned long chipset_id;
118}; 117};
119 118
120static ssize_t show_name(struct device *dev, struct device_attribute *devattr, 119static ssize_t show_name(struct device *dev, struct device_attribute *devattr,
@@ -444,8 +443,6 @@ static int __devinit i5k_find_amb_registers(struct i5k_amb_data *data,
444 goto out; 443 goto out;
445 } 444 }
446 445
447 data->chipset_id = devid;
448
449 res = 0; 446 res = 0;
450out: 447out:
451 pci_dev_put(pcidev); 448 pci_dev_put(pcidev);
@@ -478,23 +475,13 @@ out:
478 return res; 475 return res;
479} 476}
480 477
481static unsigned long i5k_channel_pci_id(struct i5k_amb_data *data, 478static struct {
482 unsigned long channel) 479 unsigned long err;
483{ 480 unsigned long fbd0;
484 switch (data->chipset_id) { 481} chipset_ids[] __devinitdata = {
485 case PCI_DEVICE_ID_INTEL_5000_ERR: 482 { PCI_DEVICE_ID_INTEL_5000_ERR, PCI_DEVICE_ID_INTEL_5000_FBD0 },
486 return PCI_DEVICE_ID_INTEL_5000_FBD0 + channel; 483 { PCI_DEVICE_ID_INTEL_5400_ERR, PCI_DEVICE_ID_INTEL_5400_FBD0 },
487 case PCI_DEVICE_ID_INTEL_5400_ERR: 484 { 0, 0 }
488 return PCI_DEVICE_ID_INTEL_5400_FBD0 + channel;
489 default:
490 BUG();
491 }
492}
493
494static unsigned long chipset_ids[] = {
495 PCI_DEVICE_ID_INTEL_5000_ERR,
496 PCI_DEVICE_ID_INTEL_5400_ERR,
497 0
498}; 485};
499 486
500#ifdef MODULE 487#ifdef MODULE
@@ -510,8 +497,7 @@ static int __devinit i5k_amb_probe(struct platform_device *pdev)
510{ 497{
511 struct i5k_amb_data *data; 498 struct i5k_amb_data *data;
512 struct resource *reso; 499 struct resource *reso;
513 int i; 500 int i, res;
514 int res = -ENODEV;
515 501
516 data = kzalloc(sizeof(*data), GFP_KERNEL); 502 data = kzalloc(sizeof(*data), GFP_KERNEL);
517 if (!data) 503 if (!data)
@@ -520,22 +506,22 @@ static int __devinit i5k_amb_probe(struct platform_device *pdev)
520 /* Figure out where the AMB registers live */ 506 /* Figure out where the AMB registers live */
521 i = 0; 507 i = 0;
522 do { 508 do {
523 res = i5k_find_amb_registers(data, chipset_ids[i]); 509 res = i5k_find_amb_registers(data, chipset_ids[i].err);
510 if (res == 0)
511 break;
524 i++; 512 i++;
525 } while (res && chipset_ids[i]); 513 } while (chipset_ids[i].err);
526 514
527 if (res) 515 if (res)
528 goto err; 516 goto err;
529 517
530 /* Copy the DIMM presence map for the first two channels */ 518 /* Copy the DIMM presence map for the first two channels */
531 res = i5k_channel_probe(&data->amb_present[0], 519 res = i5k_channel_probe(&data->amb_present[0], chipset_ids[i].fbd0);
532 i5k_channel_pci_id(data, 0));
533 if (res) 520 if (res)
534 goto err; 521 goto err;
535 522
536 /* Copy the DIMM presence map for the optional second two channels */ 523 /* Copy the DIMM presence map for the optional second two channels */
537 i5k_channel_probe(&data->amb_present[2], 524 i5k_channel_probe(&data->amb_present[2], chipset_ids[i].fbd0 + 1);
538 i5k_channel_pci_id(data, 1));
539 525
540 /* Set up resource regions */ 526 /* Set up resource regions */
541 reso = request_mem_region(data->amb_base, data->amb_len, DRVNAME); 527 reso = request_mem_region(data->amb_base, data->amb_len, DRVNAME);
diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
index 1a409c5bc9bc..c316294c48b4 100644
--- a/drivers/hwmon/ibmaem.c
+++ b/drivers/hwmon/ibmaem.c
@@ -432,13 +432,15 @@ static int aem_read_sensor(struct aem_data *data, u8 elt, u8 reg,
432 aem_send_message(ipmi); 432 aem_send_message(ipmi);
433 433
434 res = wait_for_completion_timeout(&ipmi->read_complete, IPMI_TIMEOUT); 434 res = wait_for_completion_timeout(&ipmi->read_complete, IPMI_TIMEOUT);
435 if (!res) 435 if (!res) {
436 return -ETIMEDOUT; 436 res = -ETIMEDOUT;
437 goto out;
438 }
437 439
438 if (ipmi->rx_result || ipmi->rx_msg_len != rs_size || 440 if (ipmi->rx_result || ipmi->rx_msg_len != rs_size ||
439 memcmp(&rs_resp->id, &system_x_id, sizeof(system_x_id))) { 441 memcmp(&rs_resp->id, &system_x_id, sizeof(system_x_id))) {
440 kfree(rs_resp); 442 res = -ENOENT;
441 return -ENOENT; 443 goto out;
442 } 444 }
443 445
444 switch (size) { 446 switch (size) {
@@ -463,8 +465,11 @@ static int aem_read_sensor(struct aem_data *data, u8 elt, u8 reg,
463 break; 465 break;
464 } 466 }
465 } 467 }
468 res = 0;
466 469
467 return 0; 470out:
471 kfree(rs_resp);
472 return res;
468} 473}
469 474
470/* Update AEM energy registers */ 475/* Update AEM energy registers */
diff --git a/drivers/hwmon/max16065.c b/drivers/hwmon/max16065.c
index d94a24fdf4ba..dd2d7b9620c2 100644
--- a/drivers/hwmon/max16065.c
+++ b/drivers/hwmon/max16065.c
@@ -124,7 +124,7 @@ static inline int MV_TO_LIMIT(int mv, int range)
124 124
125static inline int ADC_TO_CURR(int adc, int gain) 125static inline int ADC_TO_CURR(int adc, int gain)
126{ 126{
127 return adc * 1400000 / gain * 255; 127 return adc * 1400000 / (gain * 255);
128} 128}
129 129
130/* 130/*
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index d7926f4336b5..eab11615dced 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -211,8 +211,7 @@ static int lookup_comp(struct ntc_data *data,
211 if (data->comp[mid].ohm <= ohm) { 211 if (data->comp[mid].ohm <= ohm) {
212 *i_low = mid; 212 *i_low = mid;
213 *i_high = mid - 1; 213 *i_high = mid - 1;
214 } 214 } else {
215 if (data->comp[mid].ohm > ohm) {
216 *i_low = mid + 1; 215 *i_low = mid + 1;
217 *i_high = mid; 216 *i_high = mid;
218 } 217 }
diff --git a/drivers/hwmon/pmbus/lm25066.c b/drivers/hwmon/pmbus/lm25066.c
index d4bc114572de..ac254fba551b 100644
--- a/drivers/hwmon/pmbus/lm25066.c
+++ b/drivers/hwmon/pmbus/lm25066.c
@@ -161,6 +161,17 @@ static int lm25066_write_word_data(struct i2c_client *client, int page, int reg,
161 return ret; 161 return ret;
162} 162}
163 163
164static int lm25066_write_byte(struct i2c_client *client, int page, u8 value)
165{
166 if (page > 1)
167 return -EINVAL;
168
169 if (page == 0)
170 return pmbus_write_byte(client, 0, value);
171
172 return 0;
173}
174
164static int lm25066_probe(struct i2c_client *client, 175static int lm25066_probe(struct i2c_client *client,
165 const struct i2c_device_id *id) 176 const struct i2c_device_id *id)
166{ 177{
@@ -204,6 +215,7 @@ static int lm25066_probe(struct i2c_client *client,
204 215
205 info->read_word_data = lm25066_read_word_data; 216 info->read_word_data = lm25066_read_word_data;
206 info->write_word_data = lm25066_write_word_data; 217 info->write_word_data = lm25066_write_word_data;
218 info->write_byte = lm25066_write_byte;
207 219
208 switch (id->driver_data) { 220 switch (id->driver_data) {
209 case lm25066: 221 case lm25066:
diff --git a/drivers/hwmon/pmbus/pmbus.h b/drivers/hwmon/pmbus/pmbus.h
index 0808d986d75b..a6ae20ffef6b 100644
--- a/drivers/hwmon/pmbus/pmbus.h
+++ b/drivers/hwmon/pmbus/pmbus.h
@@ -325,6 +325,7 @@ struct pmbus_driver_info {
325 int (*read_word_data)(struct i2c_client *client, int page, int reg); 325 int (*read_word_data)(struct i2c_client *client, int page, int reg);
326 int (*write_word_data)(struct i2c_client *client, int page, int reg, 326 int (*write_word_data)(struct i2c_client *client, int page, int reg,
327 u16 word); 327 u16 word);
328 int (*write_byte)(struct i2c_client *client, int page, u8 value);
328 /* 329 /*
329 * The identify function determines supported PMBus functionality. 330 * The identify function determines supported PMBus functionality.
330 * This function is only necessary if a chip driver supports multiple 331 * This function is only necessary if a chip driver supports multiple
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index 5c1b6cf31701..397fc59b5682 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -182,6 +182,24 @@ int pmbus_write_byte(struct i2c_client *client, int page, u8 value)
182} 182}
183EXPORT_SYMBOL_GPL(pmbus_write_byte); 183EXPORT_SYMBOL_GPL(pmbus_write_byte);
184 184
185/*
186 * _pmbus_write_byte() is similar to pmbus_write_byte(), but checks if
187 * a device specific mapping funcion exists and calls it if necessary.
188 */
189static int _pmbus_write_byte(struct i2c_client *client, int page, u8 value)
190{
191 struct pmbus_data *data = i2c_get_clientdata(client);
192 const struct pmbus_driver_info *info = data->info;
193 int status;
194
195 if (info->write_byte) {
196 status = info->write_byte(client, page, value);
197 if (status != -ENODATA)
198 return status;
199 }
200 return pmbus_write_byte(client, page, value);
201}
202
185int pmbus_write_word_data(struct i2c_client *client, u8 page, u8 reg, u16 word) 203int pmbus_write_word_data(struct i2c_client *client, u8 page, u8 reg, u16 word)
186{ 204{
187 int rv; 205 int rv;
@@ -281,7 +299,7 @@ static int _pmbus_read_byte_data(struct i2c_client *client, int page, int reg)
281 299
282static void pmbus_clear_fault_page(struct i2c_client *client, int page) 300static void pmbus_clear_fault_page(struct i2c_client *client, int page)
283{ 301{
284 pmbus_write_byte(client, page, PMBUS_CLEAR_FAULTS); 302 _pmbus_write_byte(client, page, PMBUS_CLEAR_FAULTS);
285} 303}
286 304
287void pmbus_clear_faults(struct i2c_client *client) 305void pmbus_clear_faults(struct i2c_client *client)
@@ -960,6 +978,8 @@ static void pmbus_find_max_attr(struct i2c_client *client,
960struct pmbus_limit_attr { 978struct pmbus_limit_attr {
961 u16 reg; /* Limit register */ 979 u16 reg; /* Limit register */
962 bool update; /* True if register needs updates */ 980 bool update; /* True if register needs updates */
981 bool low; /* True if low limit; for limits with compare
982 functions only */
963 const char *attr; /* Attribute name */ 983 const char *attr; /* Attribute name */
964 const char *alarm; /* Alarm attribute name */ 984 const char *alarm; /* Alarm attribute name */
965 u32 sbit; /* Alarm attribute status bit */ 985 u32 sbit; /* Alarm attribute status bit */
@@ -1011,7 +1031,8 @@ static bool pmbus_add_limit_attrs(struct i2c_client *client,
1011 if (attr->compare) { 1031 if (attr->compare) {
1012 pmbus_add_boolean_cmp(data, name, 1032 pmbus_add_boolean_cmp(data, name,
1013 l->alarm, index, 1033 l->alarm, index,
1014 cbase, cindex, 1034 l->low ? cindex : cbase,
1035 l->low ? cbase : cindex,
1015 attr->sbase + page, l->sbit); 1036 attr->sbase + page, l->sbit);
1016 } else { 1037 } else {
1017 pmbus_add_boolean_reg(data, name, 1038 pmbus_add_boolean_reg(data, name,
@@ -1348,11 +1369,13 @@ static const struct pmbus_sensor_attr power_attributes[] = {
1348static const struct pmbus_limit_attr temp_limit_attrs[] = { 1369static const struct pmbus_limit_attr temp_limit_attrs[] = {
1349 { 1370 {
1350 .reg = PMBUS_UT_WARN_LIMIT, 1371 .reg = PMBUS_UT_WARN_LIMIT,
1372 .low = true,
1351 .attr = "min", 1373 .attr = "min",
1352 .alarm = "min_alarm", 1374 .alarm = "min_alarm",
1353 .sbit = PB_TEMP_UT_WARNING, 1375 .sbit = PB_TEMP_UT_WARNING,
1354 }, { 1376 }, {
1355 .reg = PMBUS_UT_FAULT_LIMIT, 1377 .reg = PMBUS_UT_FAULT_LIMIT,
1378 .low = true,
1356 .attr = "lcrit", 1379 .attr = "lcrit",
1357 .alarm = "lcrit_alarm", 1380 .alarm = "lcrit_alarm",
1358 .sbit = PB_TEMP_UT_FAULT, 1381 .sbit = PB_TEMP_UT_FAULT,
@@ -1381,11 +1404,13 @@ static const struct pmbus_limit_attr temp_limit_attrs[] = {
1381static const struct pmbus_limit_attr temp_limit_attrs23[] = { 1404static const struct pmbus_limit_attr temp_limit_attrs23[] = {
1382 { 1405 {
1383 .reg = PMBUS_UT_WARN_LIMIT, 1406 .reg = PMBUS_UT_WARN_LIMIT,
1407 .low = true,
1384 .attr = "min", 1408 .attr = "min",
1385 .alarm = "min_alarm", 1409 .alarm = "min_alarm",
1386 .sbit = PB_TEMP_UT_WARNING, 1410 .sbit = PB_TEMP_UT_WARNING,
1387 }, { 1411 }, {
1388 .reg = PMBUS_UT_FAULT_LIMIT, 1412 .reg = PMBUS_UT_FAULT_LIMIT,
1413 .low = true,
1389 .attr = "lcrit", 1414 .attr = "lcrit",
1390 .alarm = "lcrit_alarm", 1415 .alarm = "lcrit_alarm",
1391 .sbit = PB_TEMP_UT_FAULT, 1416 .sbit = PB_TEMP_UT_FAULT,
diff --git a/drivers/hwmon/pmbus/ucd9000.c b/drivers/hwmon/pmbus/ucd9000.c
index ace1c7319734..d0ddb60155c9 100644
--- a/drivers/hwmon/pmbus/ucd9000.c
+++ b/drivers/hwmon/pmbus/ucd9000.c
@@ -141,13 +141,11 @@ static int ucd9000_probe(struct i2c_client *client,
141 block_buffer[ret] = '\0'; 141 block_buffer[ret] = '\0';
142 dev_info(&client->dev, "Device ID %s\n", block_buffer); 142 dev_info(&client->dev, "Device ID %s\n", block_buffer);
143 143
144 mid = NULL; 144 for (mid = ucd9000_id; mid->name[0]; mid++) {
145 for (i = 0; i < ARRAY_SIZE(ucd9000_id); i++) {
146 mid = &ucd9000_id[i];
147 if (!strncasecmp(mid->name, block_buffer, strlen(mid->name))) 145 if (!strncasecmp(mid->name, block_buffer, strlen(mid->name)))
148 break; 146 break;
149 } 147 }
150 if (!mid || !strlen(mid->name)) { 148 if (!mid->name[0]) {
151 dev_err(&client->dev, "Unsupported device\n"); 149 dev_err(&client->dev, "Unsupported device\n");
152 return -ENODEV; 150 return -ENODEV;
153 } 151 }
diff --git a/drivers/hwmon/pmbus/ucd9200.c b/drivers/hwmon/pmbus/ucd9200.c
index ffcc1cf3609d..c65e9da707cc 100644
--- a/drivers/hwmon/pmbus/ucd9200.c
+++ b/drivers/hwmon/pmbus/ucd9200.c
@@ -68,13 +68,11 @@ static int ucd9200_probe(struct i2c_client *client,
68 block_buffer[ret] = '\0'; 68 block_buffer[ret] = '\0';
69 dev_info(&client->dev, "Device ID %s\n", block_buffer); 69 dev_info(&client->dev, "Device ID %s\n", block_buffer);
70 70
71 mid = NULL; 71 for (mid = ucd9200_id; mid->name[0]; mid++) {
72 for (i = 0; i < ARRAY_SIZE(ucd9200_id); i++) {
73 mid = &ucd9200_id[i];
74 if (!strncasecmp(mid->name, block_buffer, strlen(mid->name))) 72 if (!strncasecmp(mid->name, block_buffer, strlen(mid->name)))
75 break; 73 break;
76 } 74 }
77 if (!mid || !strlen(mid->name)) { 75 if (!mid->name[0]) {
78 dev_err(&client->dev, "Unsupported device\n"); 76 dev_err(&client->dev, "Unsupported device\n");
79 return -ENODEV; 77 return -ENODEV;
80 } 78 }
diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c
index 17cf1ab95521..8c2844e5691c 100644
--- a/drivers/hwmon/w83791d.c
+++ b/drivers/hwmon/w83791d.c
@@ -329,8 +329,8 @@ static int w83791d_detect(struct i2c_client *client,
329 struct i2c_board_info *info); 329 struct i2c_board_info *info);
330static int w83791d_remove(struct i2c_client *client); 330static int w83791d_remove(struct i2c_client *client);
331 331
332static int w83791d_read(struct i2c_client *client, u8 register); 332static int w83791d_read(struct i2c_client *client, u8 reg);
333static int w83791d_write(struct i2c_client *client, u8 register, u8 value); 333static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
334static struct w83791d_data *w83791d_update_device(struct device *dev); 334static struct w83791d_data *w83791d_update_device(struct device *dev);
335 335
336#ifdef DEBUG 336#ifdef DEBUG
diff --git a/drivers/i2c/busses/i2c-ixp2000.c b/drivers/i2c/busses/i2c-ixp2000.c
index 5d8aed5ec21b..c01e9519f6c1 100644
--- a/drivers/i2c/busses/i2c-ixp2000.c
+++ b/drivers/i2c/busses/i2c-ixp2000.c
@@ -35,7 +35,7 @@
35#include <linux/slab.h> 35#include <linux/slab.h>
36 36
37#include <mach/hardware.h> /* Pick up IXP2000-specific bits */ 37#include <mach/hardware.h> /* Pick up IXP2000-specific bits */
38#include <mach/gpio.h> 38#include <mach/gpio-ixp2000.h>
39 39
40static inline int ixp2000_scl_pin(void *data) 40static inline int ixp2000_scl_pin(void *data)
41{ 41{
diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c
index 0c731ca69f15..b228e09c5d05 100644
--- a/drivers/i2c/busses/i2c-nomadik.c
+++ b/drivers/i2c/busses/i2c-nomadik.c
@@ -146,6 +146,7 @@ struct i2c_nmk_client {
146 * @stop: stop condition 146 * @stop: stop condition
147 * @xfer_complete: acknowledge completion for a I2C message 147 * @xfer_complete: acknowledge completion for a I2C message
148 * @result: controller propogated result 148 * @result: controller propogated result
149 * @regulator: pointer to i2c regulator
149 * @busy: Busy doing transfer 150 * @busy: Busy doing transfer
150 */ 151 */
151struct nmk_i2c_dev { 152struct nmk_i2c_dev {
@@ -417,12 +418,12 @@ static int read_i2c(struct nmk_i2c_dev *dev)
417 writel(readl(dev->virtbase + I2C_IMSCR) | irq_mask, 418 writel(readl(dev->virtbase + I2C_IMSCR) | irq_mask,
418 dev->virtbase + I2C_IMSCR); 419 dev->virtbase + I2C_IMSCR);
419 420
420 timeout = wait_for_completion_interruptible_timeout( 421 timeout = wait_for_completion_timeout(
421 &dev->xfer_complete, dev->adap.timeout); 422 &dev->xfer_complete, dev->adap.timeout);
422 423
423 if (timeout < 0) { 424 if (timeout < 0) {
424 dev_err(&dev->pdev->dev, 425 dev_err(&dev->pdev->dev,
425 "wait_for_completion_interruptible_timeout" 426 "wait_for_completion_timeout"
426 "returned %d waiting for event\n", timeout); 427 "returned %d waiting for event\n", timeout);
427 status = timeout; 428 status = timeout;
428 } 429 }
@@ -504,12 +505,12 @@ static int write_i2c(struct nmk_i2c_dev *dev)
504 writel(readl(dev->virtbase + I2C_IMSCR) | irq_mask, 505 writel(readl(dev->virtbase + I2C_IMSCR) | irq_mask,
505 dev->virtbase + I2C_IMSCR); 506 dev->virtbase + I2C_IMSCR);
506 507
507 timeout = wait_for_completion_interruptible_timeout( 508 timeout = wait_for_completion_timeout(
508 &dev->xfer_complete, dev->adap.timeout); 509 &dev->xfer_complete, dev->adap.timeout);
509 510
510 if (timeout < 0) { 511 if (timeout < 0) {
511 dev_err(&dev->pdev->dev, 512 dev_err(&dev->pdev->dev,
512 "wait_for_completion_interruptible_timeout" 513 "wait_for_completion_timeout "
513 "returned %d waiting for event\n", timeout); 514 "returned %d waiting for event\n", timeout);
514 status = timeout; 515 status = timeout;
515 } 516 }
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 1a766cf74f6b..2dfb63176856 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -1139,41 +1139,12 @@ omap_i2c_remove(struct platform_device *pdev)
1139 return 0; 1139 return 0;
1140} 1140}
1141 1141
1142#ifdef CONFIG_SUSPEND
1143static int omap_i2c_suspend(struct device *dev)
1144{
1145 if (!pm_runtime_suspended(dev))
1146 if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend)
1147 dev->bus->pm->runtime_suspend(dev);
1148
1149 return 0;
1150}
1151
1152static int omap_i2c_resume(struct device *dev)
1153{
1154 if (!pm_runtime_suspended(dev))
1155 if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume)
1156 dev->bus->pm->runtime_resume(dev);
1157
1158 return 0;
1159}
1160
1161static struct dev_pm_ops omap_i2c_pm_ops = {
1162 .suspend = omap_i2c_suspend,
1163 .resume = omap_i2c_resume,
1164};
1165#define OMAP_I2C_PM_OPS (&omap_i2c_pm_ops)
1166#else
1167#define OMAP_I2C_PM_OPS NULL
1168#endif
1169
1170static struct platform_driver omap_i2c_driver = { 1142static struct platform_driver omap_i2c_driver = {
1171 .probe = omap_i2c_probe, 1143 .probe = omap_i2c_probe,
1172 .remove = omap_i2c_remove, 1144 .remove = omap_i2c_remove,
1173 .driver = { 1145 .driver = {
1174 .name = "omap_i2c", 1146 .name = "omap_i2c",
1175 .owner = THIS_MODULE, 1147 .owner = THIS_MODULE,
1176 .pm = OMAP_I2C_PM_OPS,
1177 }, 1148 },
1178}; 1149};
1179 1150
diff --git a/drivers/i2c/busses/i2c-pxa-pci.c b/drivers/i2c/busses/i2c-pxa-pci.c
index 6659d269b841..b73da6cd6f91 100644
--- a/drivers/i2c/busses/i2c-pxa-pci.c
+++ b/drivers/i2c/busses/i2c-pxa-pci.c
@@ -109,12 +109,15 @@ static int __devinit ce4100_i2c_probe(struct pci_dev *dev,
109 return -EINVAL; 109 return -EINVAL;
110 } 110 }
111 sds = kzalloc(sizeof(*sds), GFP_KERNEL); 111 sds = kzalloc(sizeof(*sds), GFP_KERNEL);
112 if (!sds) 112 if (!sds) {
113 ret = -ENOMEM;
113 goto err_mem; 114 goto err_mem;
115 }
114 116
115 for (i = 0; i < ARRAY_SIZE(sds->pdev); i++) { 117 for (i = 0; i < ARRAY_SIZE(sds->pdev); i++) {
116 sds->pdev[i] = add_i2c_device(dev, i); 118 sds->pdev[i] = add_i2c_device(dev, i);
117 if (IS_ERR(sds->pdev[i])) { 119 if (IS_ERR(sds->pdev[i])) {
120 ret = PTR_ERR(sds->pdev[i]);
118 while (--i >= 0) 121 while (--i >= 0)
119 platform_device_unregister(sds->pdev[i]); 122 platform_device_unregister(sds->pdev[i]);
120 goto err_dev_add; 123 goto err_dev_add;
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 2440b7411978..3c94c4a81a55 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -270,14 +270,30 @@ static int tegra_i2c_fill_tx_fifo(struct tegra_i2c_dev *i2c_dev)
270 270
271 /* Rounds down to not include partial word at the end of buf */ 271 /* Rounds down to not include partial word at the end of buf */
272 words_to_transfer = buf_remaining / BYTES_PER_FIFO_WORD; 272 words_to_transfer = buf_remaining / BYTES_PER_FIFO_WORD;
273 if (words_to_transfer > tx_fifo_avail)
274 words_to_transfer = tx_fifo_avail;
275 273
276 i2c_writesl(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer); 274 /* It's very common to have < 4 bytes, so optimize that case. */
277 275 if (words_to_transfer) {
278 buf += words_to_transfer * BYTES_PER_FIFO_WORD; 276 if (words_to_transfer > tx_fifo_avail)
279 buf_remaining -= words_to_transfer * BYTES_PER_FIFO_WORD; 277 words_to_transfer = tx_fifo_avail;
280 tx_fifo_avail -= words_to_transfer; 278
279 /*
280 * Update state before writing to FIFO. If this casues us
281 * to finish writing all bytes (AKA buf_remaining goes to 0) we
282 * have a potential for an interrupt (PACKET_XFER_COMPLETE is
283 * not maskable). We need to make sure that the isr sees
284 * buf_remaining as 0 and doesn't call us back re-entrantly.
285 */
286 buf_remaining -= words_to_transfer * BYTES_PER_FIFO_WORD;
287 tx_fifo_avail -= words_to_transfer;
288 i2c_dev->msg_buf_remaining = buf_remaining;
289 i2c_dev->msg_buf = buf +
290 words_to_transfer * BYTES_PER_FIFO_WORD;
291 barrier();
292
293 i2c_writesl(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer);
294
295 buf += words_to_transfer * BYTES_PER_FIFO_WORD;
296 }
281 297
282 /* 298 /*
283 * If there is a partial word at the end of buf, handle it manually to 299 * If there is a partial word at the end of buf, handle it manually to
@@ -287,14 +303,15 @@ static int tegra_i2c_fill_tx_fifo(struct tegra_i2c_dev *i2c_dev)
287 if (tx_fifo_avail > 0 && buf_remaining > 0) { 303 if (tx_fifo_avail > 0 && buf_remaining > 0) {
288 BUG_ON(buf_remaining > 3); 304 BUG_ON(buf_remaining > 3);
289 memcpy(&val, buf, buf_remaining); 305 memcpy(&val, buf, buf_remaining);
306
307 /* Again update before writing to FIFO to make sure isr sees. */
308 i2c_dev->msg_buf_remaining = 0;
309 i2c_dev->msg_buf = NULL;
310 barrier();
311
290 i2c_writel(i2c_dev, val, I2C_TX_FIFO); 312 i2c_writel(i2c_dev, val, I2C_TX_FIFO);
291 buf_remaining = 0;
292 tx_fifo_avail--;
293 } 313 }
294 314
295 BUG_ON(tx_fifo_avail > 0 && buf_remaining > 0);
296 i2c_dev->msg_buf_remaining = buf_remaining;
297 i2c_dev->msg_buf = buf;
298 return 0; 315 return 0;
299} 316}
300 317
@@ -411,9 +428,10 @@ static irqreturn_t tegra_i2c_isr(int irq, void *dev_id)
411 tegra_i2c_mask_irq(i2c_dev, I2C_INT_TX_FIFO_DATA_REQ); 428 tegra_i2c_mask_irq(i2c_dev, I2C_INT_TX_FIFO_DATA_REQ);
412 } 429 }
413 430
414 if ((status & I2C_INT_PACKET_XFER_COMPLETE) && 431 if (status & I2C_INT_PACKET_XFER_COMPLETE) {
415 !i2c_dev->msg_buf_remaining) 432 BUG_ON(i2c_dev->msg_buf_remaining);
416 complete(&i2c_dev->msg_complete); 433 complete(&i2c_dev->msg_complete);
434 }
417 435
418 i2c_writel(i2c_dev, status, I2C_INT_STATUS); 436 i2c_writel(i2c_dev, status, I2C_INT_STATUS);
419 if (i2c_dev->is_dvc) 437 if (i2c_dev->is_dvc)
@@ -531,7 +549,7 @@ static int tegra_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
531 549
532static u32 tegra_i2c_func(struct i2c_adapter *adap) 550static u32 tegra_i2c_func(struct i2c_adapter *adap)
533{ 551{
534 return I2C_FUNC_I2C; 552 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
535} 553}
536 554
537static const struct i2c_algorithm tegra_i2c_algo = { 555static const struct i2c_algorithm tegra_i2c_algo = {
@@ -719,6 +737,17 @@ static int tegra_i2c_resume(struct platform_device *pdev)
719} 737}
720#endif 738#endif
721 739
740#if defined(CONFIG_OF)
741/* Match table for of_platform binding */
742static const struct of_device_id tegra_i2c_of_match[] __devinitconst = {
743 { .compatible = "nvidia,tegra20-i2c", },
744 {},
745};
746MODULE_DEVICE_TABLE(of, tegra_i2c_of_match);
747#else
748#define tegra_i2c_of_match NULL
749#endif
750
722static struct platform_driver tegra_i2c_driver = { 751static struct platform_driver tegra_i2c_driver = {
723 .probe = tegra_i2c_probe, 752 .probe = tegra_i2c_probe,
724 .remove = tegra_i2c_remove, 753 .remove = tegra_i2c_remove,
@@ -729,6 +758,7 @@ static struct platform_driver tegra_i2c_driver = {
729 .driver = { 758 .driver = {
730 .name = "tegra-i2c", 759 .name = "tegra-i2c",
731 .owner = THIS_MODULE, 760 .owner = THIS_MODULE,
761 .of_match_table = tegra_i2c_of_match,
732 }, 762 },
733}; 763};
734 764
diff --git a/drivers/ide/at91_ide.c b/drivers/ide/at91_ide.c
index 000a78e5246c..6dede8f366c5 100644
--- a/drivers/ide/at91_ide.c
+++ b/drivers/ide/at91_ide.c
@@ -28,7 +28,7 @@
28#include <linux/platform_device.h> 28#include <linux/platform_device.h>
29 29
30#include <mach/board.h> 30#include <mach/board.h>
31#include <mach/gpio.h> 31#include <asm/gpio.h>
32#include <mach/at91sam9_smc.h> 32#include <mach/at91sam9_smc.h>
33 33
34#define DRV_NAME "at91_ide" 34#define DRV_NAME "at91_ide"
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index 274798068a54..16f69be820c7 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -435,7 +435,12 @@ static int idedisk_prep_fn(struct request_queue *q, struct request *rq)
435 if (!(rq->cmd_flags & REQ_FLUSH)) 435 if (!(rq->cmd_flags & REQ_FLUSH))
436 return BLKPREP_OK; 436 return BLKPREP_OK;
437 437
438 cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC); 438 if (rq->special) {
439 cmd = rq->special;
440 memset(cmd, 0, sizeof(*cmd));
441 } else {
442 cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
443 }
439 444
440 /* FIXME: map struct ide_taskfile on rq->cmd[] */ 445 /* FIXME: map struct ide_taskfile on rq->cmd[] */
441 BUG_ON(cmd == NULL); 446 BUG_ON(cmd == NULL);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index 17bf9d95463c..6cd642aaa4de 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -287,7 +287,7 @@ void __free_ep(struct kref *kref)
287 if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) { 287 if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
288 cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid); 288 cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid);
289 dst_release(ep->dst); 289 dst_release(ep->dst);
290 l2t_release(L2DATA(ep->com.tdev), ep->l2t); 290 l2t_release(ep->com.tdev, ep->l2t);
291 } 291 }
292 kfree(ep); 292 kfree(ep);
293} 293}
@@ -1178,7 +1178,7 @@ static int act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1178 release_tid(ep->com.tdev, GET_TID(rpl), NULL); 1178 release_tid(ep->com.tdev, GET_TID(rpl), NULL);
1179 cxgb3_free_atid(ep->com.tdev, ep->atid); 1179 cxgb3_free_atid(ep->com.tdev, ep->atid);
1180 dst_release(ep->dst); 1180 dst_release(ep->dst);
1181 l2t_release(L2DATA(ep->com.tdev), ep->l2t); 1181 l2t_release(ep->com.tdev, ep->l2t);
1182 put_ep(&ep->com); 1182 put_ep(&ep->com);
1183 return CPL_RET_BUF_DONE; 1183 return CPL_RET_BUF_DONE;
1184} 1184}
@@ -1377,7 +1377,7 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1377 if (!child_ep) { 1377 if (!child_ep) {
1378 printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n", 1378 printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n",
1379 __func__); 1379 __func__);
1380 l2t_release(L2DATA(tdev), l2t); 1380 l2t_release(tdev, l2t);
1381 dst_release(dst); 1381 dst_release(dst);
1382 goto reject; 1382 goto reject;
1383 } 1383 }
@@ -1956,7 +1956,7 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1956 if (!err) 1956 if (!err)
1957 goto out; 1957 goto out;
1958 1958
1959 l2t_release(L2DATA(h->rdev.t3cdev_p), ep->l2t); 1959 l2t_release(h->rdev.t3cdev_p, ep->l2t);
1960fail4: 1960fail4:
1961 dst_release(ep->dst); 1961 dst_release(ep->dst);
1962fail3: 1962fail3:
@@ -2127,7 +2127,7 @@ int iwch_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
2127 PDBG("%s ep %p redirect to dst %p l2t %p\n", __func__, ep, new, 2127 PDBG("%s ep %p redirect to dst %p l2t %p\n", __func__, ep, new,
2128 l2t); 2128 l2t);
2129 dst_hold(new); 2129 dst_hold(new);
2130 l2t_release(L2DATA(ep->com.tdev), ep->l2t); 2130 l2t_release(ep->com.tdev, ep->l2t);
2131 ep->l2t = l2t; 2131 ep->l2t = l2t;
2132 dst_release(old); 2132 dst_release(old);
2133 ep->dst = new; 2133 ep->dst = new;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 43f89ba0a908..fe89c4660d55 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -717,11 +717,13 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
717{ 717{
718 struct ipoib_dev_priv *priv = netdev_priv(dev); 718 struct ipoib_dev_priv *priv = netdev_priv(dev);
719 struct ipoib_neigh *neigh; 719 struct ipoib_neigh *neigh;
720 struct neighbour *n; 720 struct neighbour *n = NULL;
721 unsigned long flags; 721 unsigned long flags;
722 722
723 n = dst_get_neighbour(skb_dst(skb)); 723 if (likely(skb_dst(skb)))
724 if (likely(skb_dst(skb) && n)) { 724 n = dst_get_neighbour(skb_dst(skb));
725
726 if (likely(n)) {
725 if (unlikely(!*to_ipoib_neigh(n))) { 727 if (unlikely(!*to_ipoib_neigh(n))) {
726 ipoib_path_lookup(skb, dev); 728 ipoib_path_lookup(skb, dev);
727 return NETDEV_TX_OK; 729 return NETDEV_TX_OK;
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 8db008de5392..9c61b9c2c597 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -101,13 +101,17 @@ iscsi_iser_recv(struct iscsi_conn *conn,
101 101
102 /* verify PDU length */ 102 /* verify PDU length */
103 datalen = ntoh24(hdr->dlength); 103 datalen = ntoh24(hdr->dlength);
104 if (datalen != rx_data_len) { 104 if (datalen > rx_data_len || (datalen + 4) < rx_data_len) {
105 printk(KERN_ERR "iscsi_iser: datalen %d (hdr) != %d (IB) \n", 105 iser_err("wrong datalen %d (hdr), %d (IB)\n",
106 datalen, rx_data_len); 106 datalen, rx_data_len);
107 rc = ISCSI_ERR_DATALEN; 107 rc = ISCSI_ERR_DATALEN;
108 goto error; 108 goto error;
109 } 109 }
110 110
111 if (datalen != rx_data_len)
112 iser_dbg("aligned datalen (%d) hdr, %d (IB)\n",
113 datalen, rx_data_len);
114
111 /* read AHS */ 115 /* read AHS */
112 ahslen = hdr->hlength * 4; 116 ahslen = hdr->hlength * 4;
113 117
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 342cbc1bdaae..db6f3ce9f3bf 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -89,7 +89,7 @@
89 } while (0) 89 } while (0)
90 90
91#define SHIFT_4K 12 91#define SHIFT_4K 12
92#define SIZE_4K (1UL << SHIFT_4K) 92#define SIZE_4K (1ULL << SHIFT_4K)
93#define MASK_4K (~(SIZE_4K-1)) 93#define MASK_4K (~(SIZE_4K-1))
94 94
95 /* support up to 512KB in one RDMA */ 95 /* support up to 512KB in one RDMA */
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 5745b7fe158c..f299de6b419b 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -412,7 +412,7 @@ int iser_send_control(struct iscsi_conn *conn,
412 memcpy(iser_conn->ib_conn->login_buf, task->data, 412 memcpy(iser_conn->ib_conn->login_buf, task->data,
413 task->data_count); 413 task->data_count);
414 tx_dsg->addr = iser_conn->ib_conn->login_dma; 414 tx_dsg->addr = iser_conn->ib_conn->login_dma;
415 tx_dsg->length = data_seg_len; 415 tx_dsg->length = task->data_count;
416 tx_dsg->lkey = device->mr->lkey; 416 tx_dsg->lkey = device->mr->lkey;
417 mdesc->num_sge = 2; 417 mdesc->num_sge = 2;
418 } 418 }
diff --git a/drivers/input/joystick/analog.c b/drivers/input/joystick/analog.c
index 9882971827e6..358cd7ee905b 100644
--- a/drivers/input/joystick/analog.c
+++ b/drivers/input/joystick/analog.c
@@ -139,7 +139,7 @@ struct analog_port {
139#include <linux/i8253.h> 139#include <linux/i8253.h>
140 140
141#define GET_TIME(x) do { if (cpu_has_tsc) rdtscl(x); else x = get_time_pit(); } while (0) 141#define GET_TIME(x) do { if (cpu_has_tsc) rdtscl(x); else x = get_time_pit(); } while (0)
142#define DELTA(x,y) (cpu_has_tsc ? ((y) - (x)) : ((x) - (y) + ((x) < (y) ? CLOCK_TICK_RATE / HZ : 0))) 142#define DELTA(x,y) (cpu_has_tsc ? ((y) - (x)) : ((x) - (y) + ((x) < (y) ? PIT_TICK_RATE / HZ : 0)))
143#define TIME_NAME (cpu_has_tsc?"TSC":"PIT") 143#define TIME_NAME (cpu_has_tsc?"TSC":"PIT")
144static unsigned int get_time_pit(void) 144static unsigned int get_time_pit(void)
145{ 145{
diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c
index 7b404e5443ed..e34eeb8ae371 100644
--- a/drivers/input/keyboard/adp5588-keys.c
+++ b/drivers/input/keyboard/adp5588-keys.c
@@ -668,4 +668,3 @@ module_exit(adp5588_exit);
668MODULE_LICENSE("GPL"); 668MODULE_LICENSE("GPL");
669MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>"); 669MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
670MODULE_DESCRIPTION("ADP5588/87 Keypad driver"); 670MODULE_DESCRIPTION("ADP5588/87 Keypad driver");
671MODULE_ALIAS("platform:adp5588-keys");
diff --git a/drivers/input/keyboard/ep93xx_keypad.c b/drivers/input/keyboard/ep93xx_keypad.c
index c8242dd190d0..aa17e024d803 100644
--- a/drivers/input/keyboard/ep93xx_keypad.c
+++ b/drivers/input/keyboard/ep93xx_keypad.c
@@ -20,6 +20,7 @@
20 * flag. 20 * flag.
21 */ 21 */
22 22
23#include <linux/module.h>
23#include <linux/platform_device.h> 24#include <linux/platform_device.h>
24#include <linux/interrupt.h> 25#include <linux/interrupt.h>
25#include <linux/clk.h> 26#include <linux/clk.h>
diff --git a/drivers/input/keyboard/omap-keypad.c b/drivers/input/keyboard/omap-keypad.c
index 33d0bdc837c0..323bcdfff248 100644
--- a/drivers/input/keyboard/omap-keypad.c
+++ b/drivers/input/keyboard/omap-keypad.c
@@ -35,7 +35,7 @@
35#include <linux/mutex.h> 35#include <linux/mutex.h>
36#include <linux/errno.h> 36#include <linux/errno.h>
37#include <linux/slab.h> 37#include <linux/slab.h>
38#include <mach/gpio.h> 38#include <asm/gpio.h>
39#include <plat/keypad.h> 39#include <plat/keypad.h>
40#include <plat/menelaus.h> 40#include <plat/menelaus.h>
41#include <asm/irq.h> 41#include <asm/irq.h>
diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c
index f270447ba951..a5a77915c650 100644
--- a/drivers/input/keyboard/tegra-kbc.c
+++ b/drivers/input/keyboard/tegra-kbc.c
@@ -702,7 +702,7 @@ err_iounmap:
702err_free_mem_region: 702err_free_mem_region:
703 release_mem_region(res->start, resource_size(res)); 703 release_mem_region(res->start, resource_size(res));
704err_free_mem: 704err_free_mem:
705 input_free_device(kbc->idev); 705 input_free_device(input_dev);
706 kfree(kbc); 706 kfree(kbc);
707 707
708 return err; 708 return err;
diff --git a/drivers/input/misc/ad714x-i2c.c b/drivers/input/misc/ad714x-i2c.c
index e21deb1baa8a..025417d74ca2 100644
--- a/drivers/input/misc/ad714x-i2c.c
+++ b/drivers/input/misc/ad714x-i2c.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * AD714X CapTouch Programmable Controller driver (I2C bus) 2 * AD714X CapTouch Programmable Controller driver (I2C bus)
3 * 3 *
4 * Copyright 2009 Analog Devices Inc. 4 * Copyright 2009-2011 Analog Devices Inc.
5 * 5 *
6 * Licensed under the GPL-2 or later. 6 * Licensed under the GPL-2 or later.
7 */ 7 */
@@ -27,54 +27,49 @@ static int ad714x_i2c_resume(struct device *dev)
27 27
28static SIMPLE_DEV_PM_OPS(ad714x_i2c_pm, ad714x_i2c_suspend, ad714x_i2c_resume); 28static SIMPLE_DEV_PM_OPS(ad714x_i2c_pm, ad714x_i2c_suspend, ad714x_i2c_resume);
29 29
30static int ad714x_i2c_write(struct device *dev, unsigned short reg, 30static int ad714x_i2c_write(struct ad714x_chip *chip,
31 unsigned short data) 31 unsigned short reg, unsigned short data)
32{ 32{
33 struct i2c_client *client = to_i2c_client(dev); 33 struct i2c_client *client = to_i2c_client(chip->dev);
34 int ret = 0; 34 int error;
35 u8 *_reg = (u8 *)&reg; 35
36 u8 *_data = (u8 *)&data; 36 chip->xfer_buf[0] = cpu_to_be16(reg);
37 37 chip->xfer_buf[1] = cpu_to_be16(data);
38 u8 tx[4] = { 38
39 _reg[1], 39 error = i2c_master_send(client, (u8 *)chip->xfer_buf,
40 _reg[0], 40 2 * sizeof(*chip->xfer_buf));
41 _data[1], 41 if (unlikely(error < 0)) {
42 _data[0] 42 dev_err(&client->dev, "I2C write error: %d\n", error);
43 }; 43 return error;
44 44 }
45 ret = i2c_master_send(client, tx, 4); 45
46 if (ret < 0) 46 return 0;
47 dev_err(&client->dev, "I2C write error\n");
48
49 return ret;
50} 47}
51 48
52static int ad714x_i2c_read(struct device *dev, unsigned short reg, 49static int ad714x_i2c_read(struct ad714x_chip *chip,
53 unsigned short *data) 50 unsigned short reg, unsigned short *data, size_t len)
54{ 51{
55 struct i2c_client *client = to_i2c_client(dev); 52 struct i2c_client *client = to_i2c_client(chip->dev);
56 int ret = 0; 53 int i;
57 u8 *_reg = (u8 *)&reg; 54 int error;
58 u8 *_data = (u8 *)data; 55
59 56 chip->xfer_buf[0] = cpu_to_be16(reg);
60 u8 tx[2] = { 57
61 _reg[1], 58 error = i2c_master_send(client, (u8 *)chip->xfer_buf,
62 _reg[0] 59 sizeof(*chip->xfer_buf));
63 }; 60 if (error >= 0)
64 u8 rx[2]; 61 error = i2c_master_recv(client, (u8 *)chip->xfer_buf,
65 62 len * sizeof(*chip->xfer_buf));
66 ret = i2c_master_send(client, tx, 2); 63
67 if (ret >= 0) 64 if (unlikely(error < 0)) {
68 ret = i2c_master_recv(client, rx, 2); 65 dev_err(&client->dev, "I2C read error: %d\n", error);
69 66 return error;
70 if (unlikely(ret < 0)) {
71 dev_err(&client->dev, "I2C read error\n");
72 } else {
73 _data[0] = rx[1];
74 _data[1] = rx[0];
75 } 67 }
76 68
77 return ret; 69 for (i = 0; i < len; i++)
70 data[i] = be16_to_cpu(chip->xfer_buf[i]);
71
72 return 0;
78} 73}
79 74
80static int __devinit ad714x_i2c_probe(struct i2c_client *client, 75static int __devinit ad714x_i2c_probe(struct i2c_client *client,
diff --git a/drivers/input/misc/ad714x-spi.c b/drivers/input/misc/ad714x-spi.c
index 4120dd549305..875b50811361 100644
--- a/drivers/input/misc/ad714x-spi.c
+++ b/drivers/input/misc/ad714x-spi.c
@@ -1,12 +1,12 @@
1/* 1/*
2 * AD714X CapTouch Programmable Controller driver (SPI bus) 2 * AD714X CapTouch Programmable Controller driver (SPI bus)
3 * 3 *
4 * Copyright 2009 Analog Devices Inc. 4 * Copyright 2009-2011 Analog Devices Inc.
5 * 5 *
6 * Licensed under the GPL-2 or later. 6 * Licensed under the GPL-2 or later.
7 */ 7 */
8 8
9#include <linux/input.h> /* BUS_I2C */ 9#include <linux/input.h> /* BUS_SPI */
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/spi/spi.h> 11#include <linux/spi/spi.h>
12#include <linux/pm.h> 12#include <linux/pm.h>
@@ -30,30 +30,68 @@ static int ad714x_spi_resume(struct device *dev)
30 30
31static SIMPLE_DEV_PM_OPS(ad714x_spi_pm, ad714x_spi_suspend, ad714x_spi_resume); 31static SIMPLE_DEV_PM_OPS(ad714x_spi_pm, ad714x_spi_suspend, ad714x_spi_resume);
32 32
33static int ad714x_spi_read(struct device *dev, unsigned short reg, 33static int ad714x_spi_read(struct ad714x_chip *chip,
34 unsigned short *data) 34 unsigned short reg, unsigned short *data, size_t len)
35{ 35{
36 struct spi_device *spi = to_spi_device(dev); 36 struct spi_device *spi = to_spi_device(chip->dev);
37 unsigned short tx = AD714x_SPI_CMD_PREFIX | AD714x_SPI_READ | reg; 37 struct spi_message message;
38 struct spi_transfer xfer[2];
39 int i;
40 int error;
41
42 spi_message_init(&message);
43 memset(xfer, 0, sizeof(xfer));
44
45 chip->xfer_buf[0] = cpu_to_be16(AD714x_SPI_CMD_PREFIX |
46 AD714x_SPI_READ | reg);
47 xfer[0].tx_buf = &chip->xfer_buf[0];
48 xfer[0].len = sizeof(chip->xfer_buf[0]);
49 spi_message_add_tail(&xfer[0], &message);
50
51 xfer[1].rx_buf = &chip->xfer_buf[1];
52 xfer[1].len = sizeof(chip->xfer_buf[1]) * len;
53 spi_message_add_tail(&xfer[1], &message);
54
55 error = spi_sync(spi, &message);
56 if (unlikely(error)) {
57 dev_err(chip->dev, "SPI read error: %d\n", error);
58 return error;
59 }
60
61 for (i = 0; i < len; i++)
62 data[i] = be16_to_cpu(chip->xfer_buf[i + 1]);
38 63
39 return spi_write_then_read(spi, (u8 *)&tx, 2, (u8 *)data, 2); 64 return 0;
40} 65}
41 66
42static int ad714x_spi_write(struct device *dev, unsigned short reg, 67static int ad714x_spi_write(struct ad714x_chip *chip,
43 unsigned short data) 68 unsigned short reg, unsigned short data)
44{ 69{
45 struct spi_device *spi = to_spi_device(dev); 70 struct spi_device *spi = to_spi_device(chip->dev);
46 unsigned short tx[2] = { 71 int error;
47 AD714x_SPI_CMD_PREFIX | reg, 72
48 data 73 chip->xfer_buf[0] = cpu_to_be16(AD714x_SPI_CMD_PREFIX | reg);
49 }; 74 chip->xfer_buf[1] = cpu_to_be16(data);
75
76 error = spi_write(spi, (u8 *)chip->xfer_buf,
77 2 * sizeof(*chip->xfer_buf));
78 if (unlikely(error)) {
79 dev_err(chip->dev, "SPI write error: %d\n", error);
80 return error;
81 }
50 82
51 return spi_write(spi, (u8 *)tx, 4); 83 return 0;
52} 84}
53 85
54static int __devinit ad714x_spi_probe(struct spi_device *spi) 86static int __devinit ad714x_spi_probe(struct spi_device *spi)
55{ 87{
56 struct ad714x_chip *chip; 88 struct ad714x_chip *chip;
89 int err;
90
91 spi->bits_per_word = 8;
92 err = spi_setup(spi);
93 if (err < 0)
94 return err;
57 95
58 chip = ad714x_probe(&spi->dev, BUS_SPI, spi->irq, 96 chip = ad714x_probe(&spi->dev, BUS_SPI, spi->irq,
59 ad714x_spi_read, ad714x_spi_write); 97 ad714x_spi_read, ad714x_spi_write);
diff --git a/drivers/input/misc/ad714x.c b/drivers/input/misc/ad714x.c
index c3a62c42cd28..ca42c7d2a3c7 100644
--- a/drivers/input/misc/ad714x.c
+++ b/drivers/input/misc/ad714x.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * AD714X CapTouch Programmable Controller driver supporting AD7142/3/7/8/7A 2 * AD714X CapTouch Programmable Controller driver supporting AD7142/3/7/8/7A
3 * 3 *
4 * Copyright 2009 Analog Devices Inc. 4 * Copyright 2009-2011 Analog Devices Inc.
5 * 5 *
6 * Licensed under the GPL-2 or later. 6 * Licensed under the GPL-2 or later.
7 */ 7 */
@@ -59,7 +59,6 @@
59#define STAGE11_AMBIENT 0x27D 59#define STAGE11_AMBIENT 0x27D
60 60
61#define PER_STAGE_REG_NUM 36 61#define PER_STAGE_REG_NUM 36
62#define STAGE_NUM 12
63#define STAGE_CFGREG_NUM 8 62#define STAGE_CFGREG_NUM 8
64#define SYS_CFGREG_NUM 8 63#define SYS_CFGREG_NUM 8
65 64
@@ -124,27 +123,6 @@ struct ad714x_driver_data {
124 * information to integrate all things which will be private data 123 * information to integrate all things which will be private data
125 * of spi/i2c device 124 * of spi/i2c device
126 */ 125 */
127struct ad714x_chip {
128 unsigned short h_state;
129 unsigned short l_state;
130 unsigned short c_state;
131 unsigned short adc_reg[STAGE_NUM];
132 unsigned short amb_reg[STAGE_NUM];
133 unsigned short sensor_val[STAGE_NUM];
134
135 struct ad714x_platform_data *hw;
136 struct ad714x_driver_data *sw;
137
138 int irq;
139 struct device *dev;
140 ad714x_read_t read;
141 ad714x_write_t write;
142
143 struct mutex mutex;
144
145 unsigned product;
146 unsigned version;
147};
148 126
149static void ad714x_use_com_int(struct ad714x_chip *ad714x, 127static void ad714x_use_com_int(struct ad714x_chip *ad714x,
150 int start_stage, int end_stage) 128 int start_stage, int end_stage)
@@ -154,13 +132,13 @@ static void ad714x_use_com_int(struct ad714x_chip *ad714x,
154 132
155 mask = ((1 << (end_stage + 1)) - 1) - ((1 << start_stage) - 1); 133 mask = ((1 << (end_stage + 1)) - 1) - ((1 << start_stage) - 1);
156 134
157 ad714x->read(ad714x->dev, STG_COM_INT_EN_REG, &data); 135 ad714x->read(ad714x, STG_COM_INT_EN_REG, &data, 1);
158 data |= 1 << end_stage; 136 data |= 1 << end_stage;
159 ad714x->write(ad714x->dev, STG_COM_INT_EN_REG, data); 137 ad714x->write(ad714x, STG_COM_INT_EN_REG, data);
160 138
161 ad714x->read(ad714x->dev, STG_HIGH_INT_EN_REG, &data); 139 ad714x->read(ad714x, STG_HIGH_INT_EN_REG, &data, 1);
162 data &= ~mask; 140 data &= ~mask;
163 ad714x->write(ad714x->dev, STG_HIGH_INT_EN_REG, data); 141 ad714x->write(ad714x, STG_HIGH_INT_EN_REG, data);
164} 142}
165 143
166static void ad714x_use_thr_int(struct ad714x_chip *ad714x, 144static void ad714x_use_thr_int(struct ad714x_chip *ad714x,
@@ -171,13 +149,13 @@ static void ad714x_use_thr_int(struct ad714x_chip *ad714x,
171 149
172 mask = ((1 << (end_stage + 1)) - 1) - ((1 << start_stage) - 1); 150 mask = ((1 << (end_stage + 1)) - 1) - ((1 << start_stage) - 1);
173 151
174 ad714x->read(ad714x->dev, STG_COM_INT_EN_REG, &data); 152 ad714x->read(ad714x, STG_COM_INT_EN_REG, &data, 1);
175 data &= ~(1 << end_stage); 153 data &= ~(1 << end_stage);
176 ad714x->write(ad714x->dev, STG_COM_INT_EN_REG, data); 154 ad714x->write(ad714x, STG_COM_INT_EN_REG, data);
177 155
178 ad714x->read(ad714x->dev, STG_HIGH_INT_EN_REG, &data); 156 ad714x->read(ad714x, STG_HIGH_INT_EN_REG, &data, 1);
179 data |= mask; 157 data |= mask;
180 ad714x->write(ad714x->dev, STG_HIGH_INT_EN_REG, data); 158 ad714x->write(ad714x, STG_HIGH_INT_EN_REG, data);
181} 159}
182 160
183static int ad714x_cal_highest_stage(struct ad714x_chip *ad714x, 161static int ad714x_cal_highest_stage(struct ad714x_chip *ad714x,
@@ -273,15 +251,16 @@ static void ad714x_slider_cal_sensor_val(struct ad714x_chip *ad714x, int idx)
273 struct ad714x_slider_plat *hw = &ad714x->hw->slider[idx]; 251 struct ad714x_slider_plat *hw = &ad714x->hw->slider[idx];
274 int i; 252 int i;
275 253
254 ad714x->read(ad714x, CDC_RESULT_S0 + hw->start_stage,
255 &ad714x->adc_reg[hw->start_stage],
256 hw->end_stage - hw->start_stage + 1);
257
276 for (i = hw->start_stage; i <= hw->end_stage; i++) { 258 for (i = hw->start_stage; i <= hw->end_stage; i++) {
277 ad714x->read(ad714x->dev, CDC_RESULT_S0 + i, 259 ad714x->read(ad714x, STAGE0_AMBIENT + i * PER_STAGE_REG_NUM,
278 &ad714x->adc_reg[i]); 260 &ad714x->amb_reg[i], 1);
279 ad714x->read(ad714x->dev, 261
280 STAGE0_AMBIENT + i * PER_STAGE_REG_NUM, 262 ad714x->sensor_val[i] =
281 &ad714x->amb_reg[i]); 263 abs(ad714x->adc_reg[i] - ad714x->amb_reg[i]);
282
283 ad714x->sensor_val[i] = abs(ad714x->adc_reg[i] -
284 ad714x->amb_reg[i]);
285 } 264 }
286} 265}
287 266
@@ -444,15 +423,16 @@ static void ad714x_wheel_cal_sensor_val(struct ad714x_chip *ad714x, int idx)
444 struct ad714x_wheel_plat *hw = &ad714x->hw->wheel[idx]; 423 struct ad714x_wheel_plat *hw = &ad714x->hw->wheel[idx];
445 int i; 424 int i;
446 425
426 ad714x->read(ad714x, CDC_RESULT_S0 + hw->start_stage,
427 &ad714x->adc_reg[hw->start_stage],
428 hw->end_stage - hw->start_stage + 1);
429
447 for (i = hw->start_stage; i <= hw->end_stage; i++) { 430 for (i = hw->start_stage; i <= hw->end_stage; i++) {
448 ad714x->read(ad714x->dev, CDC_RESULT_S0 + i, 431 ad714x->read(ad714x, STAGE0_AMBIENT + i * PER_STAGE_REG_NUM,
449 &ad714x->adc_reg[i]); 432 &ad714x->amb_reg[i], 1);
450 ad714x->read(ad714x->dev,
451 STAGE0_AMBIENT + i * PER_STAGE_REG_NUM,
452 &ad714x->amb_reg[i]);
453 if (ad714x->adc_reg[i] > ad714x->amb_reg[i]) 433 if (ad714x->adc_reg[i] > ad714x->amb_reg[i])
454 ad714x->sensor_val[i] = ad714x->adc_reg[i] - 434 ad714x->sensor_val[i] =
455 ad714x->amb_reg[i]; 435 ad714x->adc_reg[i] - ad714x->amb_reg[i];
456 else 436 else
457 ad714x->sensor_val[i] = 0; 437 ad714x->sensor_val[i] = 0;
458 } 438 }
@@ -597,15 +577,16 @@ static void touchpad_cal_sensor_val(struct ad714x_chip *ad714x, int idx)
597 struct ad714x_touchpad_plat *hw = &ad714x->hw->touchpad[idx]; 577 struct ad714x_touchpad_plat *hw = &ad714x->hw->touchpad[idx];
598 int i; 578 int i;
599 579
580 ad714x->read(ad714x, CDC_RESULT_S0 + hw->x_start_stage,
581 &ad714x->adc_reg[hw->x_start_stage],
582 hw->x_end_stage - hw->x_start_stage + 1);
583
600 for (i = hw->x_start_stage; i <= hw->x_end_stage; i++) { 584 for (i = hw->x_start_stage; i <= hw->x_end_stage; i++) {
601 ad714x->read(ad714x->dev, CDC_RESULT_S0 + i, 585 ad714x->read(ad714x, STAGE0_AMBIENT + i * PER_STAGE_REG_NUM,
602 &ad714x->adc_reg[i]); 586 &ad714x->amb_reg[i], 1);
603 ad714x->read(ad714x->dev,
604 STAGE0_AMBIENT + i * PER_STAGE_REG_NUM,
605 &ad714x->amb_reg[i]);
606 if (ad714x->adc_reg[i] > ad714x->amb_reg[i]) 587 if (ad714x->adc_reg[i] > ad714x->amb_reg[i])
607 ad714x->sensor_val[i] = ad714x->adc_reg[i] - 588 ad714x->sensor_val[i] =
608 ad714x->amb_reg[i]; 589 ad714x->adc_reg[i] - ad714x->amb_reg[i];
609 else 590 else
610 ad714x->sensor_val[i] = 0; 591 ad714x->sensor_val[i] = 0;
611 } 592 }
@@ -891,7 +872,7 @@ static int ad714x_hw_detect(struct ad714x_chip *ad714x)
891{ 872{
892 unsigned short data; 873 unsigned short data;
893 874
894 ad714x->read(ad714x->dev, AD714X_PARTID_REG, &data); 875 ad714x->read(ad714x, AD714X_PARTID_REG, &data, 1);
895 switch (data & 0xFFF0) { 876 switch (data & 0xFFF0) {
896 case AD7142_PARTID: 877 case AD7142_PARTID:
897 ad714x->product = 0x7142; 878 ad714x->product = 0x7142;
@@ -940,23 +921,20 @@ static void ad714x_hw_init(struct ad714x_chip *ad714x)
940 for (i = 0; i < STAGE_NUM; i++) { 921 for (i = 0; i < STAGE_NUM; i++) {
941 reg_base = AD714X_STAGECFG_REG + i * STAGE_CFGREG_NUM; 922 reg_base = AD714X_STAGECFG_REG + i * STAGE_CFGREG_NUM;
942 for (j = 0; j < STAGE_CFGREG_NUM; j++) 923 for (j = 0; j < STAGE_CFGREG_NUM; j++)
943 ad714x->write(ad714x->dev, reg_base + j, 924 ad714x->write(ad714x, reg_base + j,
944 ad714x->hw->stage_cfg_reg[i][j]); 925 ad714x->hw->stage_cfg_reg[i][j]);
945 } 926 }
946 927
947 for (i = 0; i < SYS_CFGREG_NUM; i++) 928 for (i = 0; i < SYS_CFGREG_NUM; i++)
948 ad714x->write(ad714x->dev, AD714X_SYSCFG_REG + i, 929 ad714x->write(ad714x, AD714X_SYSCFG_REG + i,
949 ad714x->hw->sys_cfg_reg[i]); 930 ad714x->hw->sys_cfg_reg[i]);
950 for (i = 0; i < SYS_CFGREG_NUM; i++) 931 for (i = 0; i < SYS_CFGREG_NUM; i++)
951 ad714x->read(ad714x->dev, AD714X_SYSCFG_REG + i, 932 ad714x->read(ad714x, AD714X_SYSCFG_REG + i, &data, 1);
952 &data);
953 933
954 ad714x->write(ad714x->dev, AD714X_STG_CAL_EN_REG, 0xFFF); 934 ad714x->write(ad714x, AD714X_STG_CAL_EN_REG, 0xFFF);
955 935
956 /* clear all interrupts */ 936 /* clear all interrupts */
957 ad714x->read(ad714x->dev, STG_LOW_INT_STA_REG, &data); 937 ad714x->read(ad714x, STG_LOW_INT_STA_REG, &ad714x->l_state, 3);
958 ad714x->read(ad714x->dev, STG_HIGH_INT_STA_REG, &data);
959 ad714x->read(ad714x->dev, STG_COM_INT_STA_REG, &data);
960} 938}
961 939
962static irqreturn_t ad714x_interrupt_thread(int irq, void *data) 940static irqreturn_t ad714x_interrupt_thread(int irq, void *data)
@@ -966,9 +944,7 @@ static irqreturn_t ad714x_interrupt_thread(int irq, void *data)
966 944
967 mutex_lock(&ad714x->mutex); 945 mutex_lock(&ad714x->mutex);
968 946
969 ad714x->read(ad714x->dev, STG_LOW_INT_STA_REG, &ad714x->l_state); 947 ad714x->read(ad714x, STG_LOW_INT_STA_REG, &ad714x->l_state, 3);
970 ad714x->read(ad714x->dev, STG_HIGH_INT_STA_REG, &ad714x->h_state);
971 ad714x->read(ad714x->dev, STG_COM_INT_STA_REG, &ad714x->c_state);
972 948
973 for (i = 0; i < ad714x->hw->button_num; i++) 949 for (i = 0; i < ad714x->hw->button_num; i++)
974 ad714x_button_state_machine(ad714x, i); 950 ad714x_button_state_machine(ad714x, i);
@@ -1245,7 +1221,7 @@ int ad714x_disable(struct ad714x_chip *ad714x)
1245 mutex_lock(&ad714x->mutex); 1221 mutex_lock(&ad714x->mutex);
1246 1222
1247 data = ad714x->hw->sys_cfg_reg[AD714X_PWR_CTRL] | 0x3; 1223 data = ad714x->hw->sys_cfg_reg[AD714X_PWR_CTRL] | 0x3;
1248 ad714x->write(ad714x->dev, AD714X_PWR_CTRL, data); 1224 ad714x->write(ad714x, AD714X_PWR_CTRL, data);
1249 1225
1250 mutex_unlock(&ad714x->mutex); 1226 mutex_unlock(&ad714x->mutex);
1251 1227
@@ -1255,24 +1231,20 @@ EXPORT_SYMBOL(ad714x_disable);
1255 1231
1256int ad714x_enable(struct ad714x_chip *ad714x) 1232int ad714x_enable(struct ad714x_chip *ad714x)
1257{ 1233{
1258 unsigned short data;
1259
1260 dev_dbg(ad714x->dev, "%s enter\n", __func__); 1234 dev_dbg(ad714x->dev, "%s enter\n", __func__);
1261 1235
1262 mutex_lock(&ad714x->mutex); 1236 mutex_lock(&ad714x->mutex);
1263 1237
1264 /* resume to non-shutdown mode */ 1238 /* resume to non-shutdown mode */
1265 1239
1266 ad714x->write(ad714x->dev, AD714X_PWR_CTRL, 1240 ad714x->write(ad714x, AD714X_PWR_CTRL,
1267 ad714x->hw->sys_cfg_reg[AD714X_PWR_CTRL]); 1241 ad714x->hw->sys_cfg_reg[AD714X_PWR_CTRL]);
1268 1242
1269 /* make sure the interrupt output line is not low level after resume, 1243 /* make sure the interrupt output line is not low level after resume,
1270 * otherwise we will get no chance to enter falling-edge irq again 1244 * otherwise we will get no chance to enter falling-edge irq again
1271 */ 1245 */
1272 1246
1273 ad714x->read(ad714x->dev, STG_LOW_INT_STA_REG, &data); 1247 ad714x->read(ad714x, STG_LOW_INT_STA_REG, &ad714x->l_state, 3);
1274 ad714x->read(ad714x->dev, STG_HIGH_INT_STA_REG, &data);
1275 ad714x->read(ad714x->dev, STG_COM_INT_STA_REG, &data);
1276 1248
1277 mutex_unlock(&ad714x->mutex); 1249 mutex_unlock(&ad714x->mutex);
1278 1250
diff --git a/drivers/input/misc/ad714x.h b/drivers/input/misc/ad714x.h
index 45c54fb13f07..3c85455aa66d 100644
--- a/drivers/input/misc/ad714x.h
+++ b/drivers/input/misc/ad714x.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * AD714X CapTouch Programmable Controller driver (bus interfaces) 2 * AD714X CapTouch Programmable Controller driver (bus interfaces)
3 * 3 *
4 * Copyright 2009 Analog Devices Inc. 4 * Copyright 2009-2011 Analog Devices Inc.
5 * 5 *
6 * Licensed under the GPL-2 or later. 6 * Licensed under the GPL-2 or later.
7 */ 7 */
@@ -11,11 +11,40 @@
11 11
12#include <linux/types.h> 12#include <linux/types.h>
13 13
14#define STAGE_NUM 12
15
14struct device; 16struct device;
17struct ad714x_platform_data;
18struct ad714x_driver_data;
15struct ad714x_chip; 19struct ad714x_chip;
16 20
17typedef int (*ad714x_read_t)(struct device *, unsigned short, unsigned short *); 21typedef int (*ad714x_read_t)(struct ad714x_chip *, unsigned short, unsigned short *, size_t);
18typedef int (*ad714x_write_t)(struct device *, unsigned short, unsigned short); 22typedef int (*ad714x_write_t)(struct ad714x_chip *, unsigned short, unsigned short);
23
24struct ad714x_chip {
25 unsigned short l_state;
26 unsigned short h_state;
27 unsigned short c_state;
28 unsigned short adc_reg[STAGE_NUM];
29 unsigned short amb_reg[STAGE_NUM];
30 unsigned short sensor_val[STAGE_NUM];
31
32 struct ad714x_platform_data *hw;
33 struct ad714x_driver_data *sw;
34
35 int irq;
36 struct device *dev;
37 ad714x_read_t read;
38 ad714x_write_t write;
39
40 struct mutex mutex;
41
42 unsigned product;
43 unsigned version;
44
45 __be16 xfer_buf[16] ____cacheline_aligned;
46
47};
19 48
20int ad714x_disable(struct ad714x_chip *ad714x); 49int ad714x_disable(struct ad714x_chip *ad714x);
21int ad714x_enable(struct ad714x_chip *ad714x); 50int ad714x_enable(struct ad714x_chip *ad714x);
diff --git a/drivers/input/misc/cm109.c b/drivers/input/misc/cm109.c
index b09c7d127219..ab860511f016 100644
--- a/drivers/input/misc/cm109.c
+++ b/drivers/input/misc/cm109.c
@@ -475,7 +475,7 @@ static void cm109_toggle_buzzer_sync(struct cm109_dev *dev, int on)
475 le16_to_cpu(dev->ctl_req->wIndex), 475 le16_to_cpu(dev->ctl_req->wIndex),
476 dev->ctl_data, 476 dev->ctl_data,
477 USB_PKT_LEN, USB_CTRL_SET_TIMEOUT); 477 USB_PKT_LEN, USB_CTRL_SET_TIMEOUT);
478 if (error && error != EINTR) 478 if (error < 0 && error != -EINTR)
479 err("%s: usb_control_msg() failed %d", __func__, error); 479 err("%s: usb_control_msg() failed %d", __func__, error);
480} 480}
481 481
diff --git a/drivers/input/misc/mma8450.c b/drivers/input/misc/mma8450.c
index 6c76cf792991..0794778295fc 100644
--- a/drivers/input/misc/mma8450.c
+++ b/drivers/input/misc/mma8450.c
@@ -234,7 +234,7 @@ static const struct of_device_id mma8450_dt_ids[] = {
234 { .compatible = "fsl,mma8450", }, 234 { .compatible = "fsl,mma8450", },
235 { /* sentinel */ } 235 { /* sentinel */ }
236}; 236};
237MODULE_DEVICE_TABLE(i2c, mma8450_dt_ids); 237MODULE_DEVICE_TABLE(of, mma8450_dt_ids);
238 238
239static struct i2c_driver mma8450_driver = { 239static struct i2c_driver mma8450_driver = {
240 .driver = { 240 .driver = {
diff --git a/drivers/input/misc/mpu3050.c b/drivers/input/misc/mpu3050.c
index b95fac15b2ea..f71dc728da58 100644
--- a/drivers/input/misc/mpu3050.c
+++ b/drivers/input/misc/mpu3050.c
@@ -282,7 +282,7 @@ err_free_irq:
282err_pm_set_suspended: 282err_pm_set_suspended:
283 pm_runtime_set_suspended(&client->dev); 283 pm_runtime_set_suspended(&client->dev);
284err_free_mem: 284err_free_mem:
285 input_unregister_device(idev); 285 input_free_device(idev);
286 kfree(sensor); 286 kfree(sensor);
287 return error; 287 return error;
288} 288}
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
index 3126983c004a..5ec617e28f7e 100644
--- a/drivers/input/mouse/bcm5974.c
+++ b/drivers/input/mouse/bcm5974.c
@@ -67,6 +67,18 @@
67#define USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI 0x0245 67#define USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI 0x0245
68#define USB_DEVICE_ID_APPLE_WELLSPRING5_ISO 0x0246 68#define USB_DEVICE_ID_APPLE_WELLSPRING5_ISO 0x0246
69#define USB_DEVICE_ID_APPLE_WELLSPRING5_JIS 0x0247 69#define USB_DEVICE_ID_APPLE_WELLSPRING5_JIS 0x0247
70/* MacbookAir4,1 (unibody, July 2011) */
71#define USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI 0x0249
72#define USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO 0x024a
73#define USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS 0x024b
74/* MacbookAir4,2 (unibody, July 2011) */
75#define USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI 0x024c
76#define USB_DEVICE_ID_APPLE_WELLSPRING6_ISO 0x024d
77#define USB_DEVICE_ID_APPLE_WELLSPRING6_JIS 0x024e
78/* Macbook8,2 (unibody) */
79#define USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI 0x0252
80#define USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO 0x0253
81#define USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS 0x0254
70 82
71#define BCM5974_DEVICE(prod) { \ 83#define BCM5974_DEVICE(prod) { \
72 .match_flags = (USB_DEVICE_ID_MATCH_DEVICE | \ 84 .match_flags = (USB_DEVICE_ID_MATCH_DEVICE | \
@@ -104,6 +116,18 @@ static const struct usb_device_id bcm5974_table[] = {
104 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI), 116 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI),
105 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_ISO), 117 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_ISO),
106 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_JIS), 118 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_JIS),
119 /* MacbookAir4,1 */
120 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI),
121 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO),
122 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS),
123 /* MacbookAir4,2 */
124 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI),
125 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6_ISO),
126 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6_JIS),
127 /* MacbookPro8,2 */
128 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI),
129 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO),
130 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS),
107 /* Terminating entry */ 131 /* Terminating entry */
108 {} 132 {}
109}; 133};
@@ -294,6 +318,42 @@ static const struct bcm5974_config bcm5974_config_table[] = {
294 { DIM_X, DIM_X / SN_COORD, -4415, 5050 }, 318 { DIM_X, DIM_X / SN_COORD, -4415, 5050 },
295 { DIM_Y, DIM_Y / SN_COORD, -55, 6680 } 319 { DIM_Y, DIM_Y / SN_COORD, -55, 6680 }
296 }, 320 },
321 {
322 USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI,
323 USB_DEVICE_ID_APPLE_WELLSPRING6_ISO,
324 USB_DEVICE_ID_APPLE_WELLSPRING6_JIS,
325 HAS_INTEGRATED_BUTTON,
326 0x84, sizeof(struct bt_data),
327 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
328 { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 },
329 { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 },
330 { DIM_X, DIM_X / SN_COORD, -4620, 5140 },
331 { DIM_Y, DIM_Y / SN_COORD, -150, 6600 }
332 },
333 {
334 USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI,
335 USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO,
336 USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS,
337 HAS_INTEGRATED_BUTTON,
338 0x84, sizeof(struct bt_data),
339 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
340 { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 },
341 { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 },
342 { DIM_X, DIM_X / SN_COORD, -4750, 5280 },
343 { DIM_Y, DIM_Y / SN_COORD, -150, 6730 }
344 },
345 {
346 USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI,
347 USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO,
348 USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS,
349 HAS_INTEGRATED_BUTTON,
350 0x84, sizeof(struct bt_data),
351 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
352 { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 },
353 { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 },
354 { DIM_X, DIM_X / SN_COORD, -4620, 5140 },
355 { DIM_Y, DIM_Y / SN_COORD, -150, 6600 }
356 },
297 {} 357 {}
298}; 358};
299 359
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
index 449c0a46dbac..958b4eb6369d 100644
--- a/drivers/input/tablet/wacom_sys.c
+++ b/drivers/input/tablet/wacom_sys.c
@@ -49,6 +49,7 @@ struct hid_descriptor {
49#define USB_REQ_GET_REPORT 0x01 49#define USB_REQ_GET_REPORT 0x01
50#define USB_REQ_SET_REPORT 0x09 50#define USB_REQ_SET_REPORT 0x09
51#define WAC_HID_FEATURE_REPORT 0x03 51#define WAC_HID_FEATURE_REPORT 0x03
52#define WAC_MSG_RETRIES 5
52 53
53static int usb_get_report(struct usb_interface *intf, unsigned char type, 54static int usb_get_report(struct usb_interface *intf, unsigned char type,
54 unsigned char id, void *buf, int size) 55 unsigned char id, void *buf, int size)
@@ -165,7 +166,7 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi
165 report, 166 report,
166 hid_desc->wDescriptorLength, 167 hid_desc->wDescriptorLength,
167 5000); /* 5 secs */ 168 5000); /* 5 secs */
168 } while (result < 0 && limit++ < 5); 169 } while (result < 0 && limit++ < WAC_MSG_RETRIES);
169 170
170 /* No need to parse the Descriptor. It isn't an error though */ 171 /* No need to parse the Descriptor. It isn't an error though */
171 if (result < 0) 172 if (result < 0)
@@ -228,13 +229,6 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi
228 get_unaligned_le16(&report[i + 3]); 229 get_unaligned_le16(&report[i + 3]);
229 i += 4; 230 i += 4;
230 } 231 }
231 } else if (usage == WCM_DIGITIZER) {
232 /* max pressure isn't reported
233 features->pressure_max = (unsigned short)
234 (report[i+4] << 8 | report[i + 3]);
235 */
236 features->pressure_max = 255;
237 i += 4;
238 } 232 }
239 break; 233 break;
240 234
@@ -290,13 +284,6 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi
290 pen = 1; 284 pen = 1;
291 i++; 285 i++;
292 break; 286 break;
293
294 case HID_USAGE_UNDEFINED:
295 if (usage == WCM_DESKTOP && finger) /* capacity */
296 features->pressure_max =
297 get_unaligned_le16(&report[i + 3]);
298 i += 4;
299 break;
300 } 287 }
301 break; 288 break;
302 289
@@ -319,24 +306,26 @@ static int wacom_query_tablet_data(struct usb_interface *intf, struct wacom_feat
319 int limit = 0, report_id = 2; 306 int limit = 0, report_id = 2;
320 int error = -ENOMEM; 307 int error = -ENOMEM;
321 308
322 rep_data = kmalloc(2, GFP_KERNEL); 309 rep_data = kmalloc(4, GFP_KERNEL);
323 if (!rep_data) 310 if (!rep_data)
324 return error; 311 return error;
325 312
326 /* ask to report tablet data if it is 2FGT Tablet PC or 313 /* ask to report tablet data if it is MT Tablet PC or
327 * not a Tablet PC */ 314 * not a Tablet PC */
328 if (features->type == TABLETPC2FG) { 315 if (features->type == TABLETPC2FG) {
329 do { 316 do {
330 rep_data[0] = 3; 317 rep_data[0] = 3;
331 rep_data[1] = 4; 318 rep_data[1] = 4;
319 rep_data[2] = 0;
320 rep_data[3] = 0;
332 report_id = 3; 321 report_id = 3;
333 error = usb_set_report(intf, WAC_HID_FEATURE_REPORT, 322 error = usb_set_report(intf, WAC_HID_FEATURE_REPORT,
334 report_id, rep_data, 2); 323 report_id, rep_data, 4);
335 if (error >= 0) 324 if (error >= 0)
336 error = usb_get_report(intf, 325 error = usb_get_report(intf,
337 WAC_HID_FEATURE_REPORT, report_id, 326 WAC_HID_FEATURE_REPORT, report_id,
338 rep_data, 3); 327 rep_data, 4);
339 } while ((error < 0 || rep_data[1] != 4) && limit++ < 5); 328 } while ((error < 0 || rep_data[1] != 4) && limit++ < WAC_MSG_RETRIES);
340 } else if (features->type != TABLETPC) { 329 } else if (features->type != TABLETPC) {
341 do { 330 do {
342 rep_data[0] = 2; 331 rep_data[0] = 2;
@@ -347,7 +336,7 @@ static int wacom_query_tablet_data(struct usb_interface *intf, struct wacom_feat
347 error = usb_get_report(intf, 336 error = usb_get_report(intf,
348 WAC_HID_FEATURE_REPORT, report_id, 337 WAC_HID_FEATURE_REPORT, report_id,
349 rep_data, 2); 338 rep_data, 2);
350 } while ((error < 0 || rep_data[1] != 2) && limit++ < 5); 339 } while ((error < 0 || rep_data[1] != 2) && limit++ < WAC_MSG_RETRIES);
351 } 340 }
352 341
353 kfree(rep_data); 342 kfree(rep_data);
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 03ebcc8b24b5..0dc97ec15c28 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -800,25 +800,26 @@ static int wacom_bpt_touch(struct wacom_wac *wacom)
800 int i; 800 int i;
801 801
802 for (i = 0; i < 2; i++) { 802 for (i = 0; i < 2; i++) {
803 int p = data[9 * i + 2]; 803 int offset = (data[1] & 0x80) ? (8 * i) : (9 * i);
804 bool touch = p && !wacom->shared->stylus_in_proximity; 804 bool touch = data[offset + 3] & 0x80;
805 805
806 input_mt_slot(input, i);
807 input_mt_report_slot_state(input, MT_TOOL_FINGER, touch);
808 /* 806 /*
809 * Touch events need to be disabled while stylus is 807 * Touch events need to be disabled while stylus is
810 * in proximity because user's hand is resting on touchpad 808 * in proximity because user's hand is resting on touchpad
811 * and sending unwanted events. User expects tablet buttons 809 * and sending unwanted events. User expects tablet buttons
812 * to continue working though. 810 * to continue working though.
813 */ 811 */
812 touch = touch && !wacom->shared->stylus_in_proximity;
813
814 input_mt_slot(input, i);
815 input_mt_report_slot_state(input, MT_TOOL_FINGER, touch);
814 if (touch) { 816 if (touch) {
815 int x = get_unaligned_be16(&data[9 * i + 3]) & 0x7ff; 817 int x = get_unaligned_be16(&data[offset + 3]) & 0x7ff;
816 int y = get_unaligned_be16(&data[9 * i + 5]) & 0x7ff; 818 int y = get_unaligned_be16(&data[offset + 5]) & 0x7ff;
817 if (features->quirks & WACOM_QUIRK_BBTOUCH_LOWRES) { 819 if (features->quirks & WACOM_QUIRK_BBTOUCH_LOWRES) {
818 x <<= 5; 820 x <<= 5;
819 y <<= 5; 821 y <<= 5;
820 } 822 }
821 input_report_abs(input, ABS_MT_PRESSURE, p);
822 input_report_abs(input, ABS_MT_POSITION_X, x); 823 input_report_abs(input, ABS_MT_POSITION_X, x);
823 input_report_abs(input, ABS_MT_POSITION_Y, y); 824 input_report_abs(input, ABS_MT_POSITION_Y, y);
824 } 825 }
@@ -1056,10 +1057,11 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
1056 features->x_fuzz, 0); 1057 features->x_fuzz, 0);
1057 input_set_abs_params(input_dev, ABS_Y, 0, features->y_max, 1058 input_set_abs_params(input_dev, ABS_Y, 0, features->y_max,
1058 features->y_fuzz, 0); 1059 features->y_fuzz, 0);
1059 input_set_abs_params(input_dev, ABS_PRESSURE, 0, features->pressure_max,
1060 features->pressure_fuzz, 0);
1061 1060
1062 if (features->device_type == BTN_TOOL_PEN) { 1061 if (features->device_type == BTN_TOOL_PEN) {
1062 input_set_abs_params(input_dev, ABS_PRESSURE, 0, features->pressure_max,
1063 features->pressure_fuzz, 0);
1064
1063 /* penabled devices have fixed resolution for each model */ 1065 /* penabled devices have fixed resolution for each model */
1064 input_abs_set_res(input_dev, ABS_X, features->x_resolution); 1066 input_abs_set_res(input_dev, ABS_X, features->x_resolution);
1065 input_abs_set_res(input_dev, ABS_Y, features->y_resolution); 1067 input_abs_set_res(input_dev, ABS_Y, features->y_resolution);
@@ -1098,6 +1100,8 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
1098 __set_bit(BTN_TOOL_MOUSE, input_dev->keybit); 1100 __set_bit(BTN_TOOL_MOUSE, input_dev->keybit);
1099 __set_bit(BTN_STYLUS, input_dev->keybit); 1101 __set_bit(BTN_STYLUS, input_dev->keybit);
1100 __set_bit(BTN_STYLUS2, input_dev->keybit); 1102 __set_bit(BTN_STYLUS2, input_dev->keybit);
1103
1104 __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
1101 break; 1105 break;
1102 1106
1103 case WACOM_21UX2: 1107 case WACOM_21UX2:
@@ -1126,6 +1130,9 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
1126 } 1130 }
1127 1131
1128 input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); 1132 input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
1133
1134 __set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
1135
1129 wacom_setup_cintiq(wacom_wac); 1136 wacom_setup_cintiq(wacom_wac);
1130 break; 1137 break;
1131 1138
@@ -1150,6 +1157,8 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
1150 /* fall through */ 1157 /* fall through */
1151 1158
1152 case INTUOS: 1159 case INTUOS:
1160 __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
1161
1153 wacom_setup_intuos(wacom_wac); 1162 wacom_setup_intuos(wacom_wac);
1154 break; 1163 break;
1155 1164
@@ -1165,6 +1174,8 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
1165 1174
1166 input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); 1175 input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
1167 wacom_setup_intuos(wacom_wac); 1176 wacom_setup_intuos(wacom_wac);
1177
1178 __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
1168 break; 1179 break;
1169 1180
1170 case TABLETPC2FG: 1181 case TABLETPC2FG:
@@ -1183,26 +1194,40 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
1183 case TABLETPC: 1194 case TABLETPC:
1184 __clear_bit(ABS_MISC, input_dev->absbit); 1195 __clear_bit(ABS_MISC, input_dev->absbit);
1185 1196
1197 __set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
1198
1186 if (features->device_type != BTN_TOOL_PEN) 1199 if (features->device_type != BTN_TOOL_PEN)
1187 break; /* no need to process stylus stuff */ 1200 break; /* no need to process stylus stuff */
1188 1201
1189 /* fall through */ 1202 /* fall through */
1190 1203
1191 case PL: 1204 case PL:
1192 case PTU:
1193 case DTU: 1205 case DTU:
1194 __set_bit(BTN_TOOL_PEN, input_dev->keybit); 1206 __set_bit(BTN_TOOL_PEN, input_dev->keybit);
1207 __set_bit(BTN_TOOL_RUBBER, input_dev->keybit);
1195 __set_bit(BTN_STYLUS, input_dev->keybit); 1208 __set_bit(BTN_STYLUS, input_dev->keybit);
1196 __set_bit(BTN_STYLUS2, input_dev->keybit); 1209 __set_bit(BTN_STYLUS2, input_dev->keybit);
1210
1211 __set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
1212 break;
1213
1214 case PTU:
1215 __set_bit(BTN_STYLUS2, input_dev->keybit);
1197 /* fall through */ 1216 /* fall through */
1198 1217
1199 case PENPARTNER: 1218 case PENPARTNER:
1219 __set_bit(BTN_TOOL_PEN, input_dev->keybit);
1200 __set_bit(BTN_TOOL_RUBBER, input_dev->keybit); 1220 __set_bit(BTN_TOOL_RUBBER, input_dev->keybit);
1221 __set_bit(BTN_STYLUS, input_dev->keybit);
1222
1223 __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
1201 break; 1224 break;
1202 1225
1203 case BAMBOO_PT: 1226 case BAMBOO_PT:
1204 __clear_bit(ABS_MISC, input_dev->absbit); 1227 __clear_bit(ABS_MISC, input_dev->absbit);
1205 1228
1229 __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
1230
1206 if (features->device_type == BTN_TOOL_DOUBLETAP) { 1231 if (features->device_type == BTN_TOOL_DOUBLETAP) {
1207 __set_bit(BTN_LEFT, input_dev->keybit); 1232 __set_bit(BTN_LEFT, input_dev->keybit);
1208 __set_bit(BTN_FORWARD, input_dev->keybit); 1233 __set_bit(BTN_FORWARD, input_dev->keybit);
@@ -1460,6 +1485,9 @@ static const struct wacom_features wacom_features_0xD3 =
1460static const struct wacom_features wacom_features_0xD4 = 1485static const struct wacom_features wacom_features_0xD4 =
1461 { "Wacom Bamboo Pen", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023, 1486 { "Wacom Bamboo Pen", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023,
1462 63, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; 1487 63, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
1488static const struct wacom_features wacom_features_0xD5 =
1489 { "Wacom Bamboo Pen 6x8", WACOM_PKGLEN_BBFUN, 21648, 13530, 1023,
1490 63, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
1463static const struct wacom_features wacom_features_0xD6 = 1491static const struct wacom_features wacom_features_0xD6 =
1464 { "Wacom BambooPT 2FG 4x5", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023, 1492 { "Wacom BambooPT 2FG 4x5", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023,
1465 63, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; 1493 63, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
@@ -1564,6 +1592,7 @@ const struct usb_device_id wacom_ids[] = {
1564 { USB_DEVICE_WACOM(0xD2) }, 1592 { USB_DEVICE_WACOM(0xD2) },
1565 { USB_DEVICE_WACOM(0xD3) }, 1593 { USB_DEVICE_WACOM(0xD3) },
1566 { USB_DEVICE_WACOM(0xD4) }, 1594 { USB_DEVICE_WACOM(0xD4) },
1595 { USB_DEVICE_WACOM(0xD5) },
1567 { USB_DEVICE_WACOM(0xD6) }, 1596 { USB_DEVICE_WACOM(0xD6) },
1568 { USB_DEVICE_WACOM(0xD7) }, 1597 { USB_DEVICE_WACOM(0xD7) },
1569 { USB_DEVICE_WACOM(0xD8) }, 1598 { USB_DEVICE_WACOM(0xD8) },
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index ae00604a6a81..f5d66859f232 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -244,6 +244,7 @@ struct mxt_finger {
244 int x; 244 int x;
245 int y; 245 int y;
246 int area; 246 int area;
247 int pressure;
247}; 248};
248 249
249/* Each client has this additional data */ 250/* Each client has this additional data */
@@ -536,6 +537,8 @@ static void mxt_input_report(struct mxt_data *data, int single_id)
536 finger[id].x); 537 finger[id].x);
537 input_report_abs(input_dev, ABS_MT_POSITION_Y, 538 input_report_abs(input_dev, ABS_MT_POSITION_Y,
538 finger[id].y); 539 finger[id].y);
540 input_report_abs(input_dev, ABS_MT_PRESSURE,
541 finger[id].pressure);
539 } else { 542 } else {
540 finger[id].status = 0; 543 finger[id].status = 0;
541 } 544 }
@@ -546,6 +549,8 @@ static void mxt_input_report(struct mxt_data *data, int single_id)
546 if (status != MXT_RELEASE) { 549 if (status != MXT_RELEASE) {
547 input_report_abs(input_dev, ABS_X, finger[single_id].x); 550 input_report_abs(input_dev, ABS_X, finger[single_id].x);
548 input_report_abs(input_dev, ABS_Y, finger[single_id].y); 551 input_report_abs(input_dev, ABS_Y, finger[single_id].y);
552 input_report_abs(input_dev,
553 ABS_PRESSURE, finger[single_id].pressure);
549 } 554 }
550 555
551 input_sync(input_dev); 556 input_sync(input_dev);
@@ -560,6 +565,7 @@ static void mxt_input_touchevent(struct mxt_data *data,
560 int x; 565 int x;
561 int y; 566 int y;
562 int area; 567 int area;
568 int pressure;
563 569
564 /* Check the touch is present on the screen */ 570 /* Check the touch is present on the screen */
565 if (!(status & MXT_DETECT)) { 571 if (!(status & MXT_DETECT)) {
@@ -584,6 +590,7 @@ static void mxt_input_touchevent(struct mxt_data *data,
584 y = y >> 2; 590 y = y >> 2;
585 591
586 area = message->message[4]; 592 area = message->message[4];
593 pressure = message->message[5];
587 594
588 dev_dbg(dev, "[%d] %s x: %d, y: %d, area: %d\n", id, 595 dev_dbg(dev, "[%d] %s x: %d, y: %d, area: %d\n", id,
589 status & MXT_MOVE ? "moved" : "pressed", 596 status & MXT_MOVE ? "moved" : "pressed",
@@ -594,6 +601,7 @@ static void mxt_input_touchevent(struct mxt_data *data,
594 finger[id].x = x; 601 finger[id].x = x;
595 finger[id].y = y; 602 finger[id].y = y;
596 finger[id].area = area; 603 finger[id].area = area;
604 finger[id].pressure = pressure;
597 605
598 mxt_input_report(data, id); 606 mxt_input_report(data, id);
599} 607}
@@ -1116,6 +1124,8 @@ static int __devinit mxt_probe(struct i2c_client *client,
1116 0, data->max_x, 0, 0); 1124 0, data->max_x, 0, 0);
1117 input_set_abs_params(input_dev, ABS_Y, 1125 input_set_abs_params(input_dev, ABS_Y,
1118 0, data->max_y, 0, 0); 1126 0, data->max_y, 0, 0);
1127 input_set_abs_params(input_dev, ABS_PRESSURE,
1128 0, 255, 0, 0);
1119 1129
1120 /* For multi touch */ 1130 /* For multi touch */
1121 input_mt_init_slots(input_dev, MXT_MAX_FINGER); 1131 input_mt_init_slots(input_dev, MXT_MAX_FINGER);
@@ -1125,6 +1135,8 @@ static int __devinit mxt_probe(struct i2c_client *client,
1125 0, data->max_x, 0, 0); 1135 0, data->max_x, 0, 0);
1126 input_set_abs_params(input_dev, ABS_MT_POSITION_Y, 1136 input_set_abs_params(input_dev, ABS_MT_POSITION_Y,
1127 0, data->max_y, 0, 0); 1137 0, data->max_y, 0, 0);
1138 input_set_abs_params(input_dev, ABS_MT_PRESSURE,
1139 0, 255, 0, 0);
1128 1140
1129 input_set_drvdata(input_dev, data); 1141 input_set_drvdata(input_dev, data);
1130 i2c_set_clientdata(client, data); 1142 i2c_set_clientdata(client, data);
diff --git a/drivers/input/touchscreen/max11801_ts.c b/drivers/input/touchscreen/max11801_ts.c
index 4f2713d92791..4627fe55b401 100644
--- a/drivers/input/touchscreen/max11801_ts.c
+++ b/drivers/input/touchscreen/max11801_ts.c
@@ -9,7 +9,8 @@
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by 11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License. 12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
13 */ 14 */
14 15
15/* 16/*
diff --git a/drivers/input/touchscreen/tnetv107x-ts.c b/drivers/input/touchscreen/tnetv107x-ts.c
index 089b0a0f3d8c..0e8f63e5b36f 100644
--- a/drivers/input/touchscreen/tnetv107x-ts.c
+++ b/drivers/input/touchscreen/tnetv107x-ts.c
@@ -13,6 +13,7 @@
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 */ 14 */
15 15
16#include <linux/module.h>
16#include <linux/kernel.h> 17#include <linux/kernel.h>
17#include <linux/err.h> 18#include <linux/err.h>
18#include <linux/errno.h> 19#include <linux/errno.h>
diff --git a/drivers/input/touchscreen/wacom_w8001.c b/drivers/input/touchscreen/wacom_w8001.c
index c14412ef4648..9941d39df43d 100644
--- a/drivers/input/touchscreen/wacom_w8001.c
+++ b/drivers/input/touchscreen/wacom_w8001.c
@@ -383,6 +383,8 @@ static int w8001_setup(struct w8001 *w8001)
383 dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); 383 dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
384 strlcat(w8001->name, "Wacom Serial", sizeof(w8001->name)); 384 strlcat(w8001->name, "Wacom Serial", sizeof(w8001->name));
385 385
386 __set_bit(INPUT_PROP_DIRECT, dev->propbit);
387
386 /* penabled? */ 388 /* penabled? */
387 error = w8001_command(w8001, W8001_CMD_QUERY, true); 389 error = w8001_command(w8001, W8001_CMD_QUERY, true);
388 if (!error) { 390 if (!error) {
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index a14f8dc23462..0e4227f457af 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -605,7 +605,9 @@ static void build_inv_all(struct iommu_cmd *cmd)
605 * Writes the command to the IOMMUs command buffer and informs the 605 * Writes the command to the IOMMUs command buffer and informs the
606 * hardware about the new command. 606 * hardware about the new command.
607 */ 607 */
608static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) 608static int iommu_queue_command_sync(struct amd_iommu *iommu,
609 struct iommu_cmd *cmd,
610 bool sync)
609{ 611{
610 u32 left, tail, head, next_tail; 612 u32 left, tail, head, next_tail;
611 unsigned long flags; 613 unsigned long flags;
@@ -639,13 +641,18 @@ again:
639 copy_cmd_to_buffer(iommu, cmd, tail); 641 copy_cmd_to_buffer(iommu, cmd, tail);
640 642
641 /* We need to sync now to make sure all commands are processed */ 643 /* We need to sync now to make sure all commands are processed */
642 iommu->need_sync = true; 644 iommu->need_sync = sync;
643 645
644 spin_unlock_irqrestore(&iommu->lock, flags); 646 spin_unlock_irqrestore(&iommu->lock, flags);
645 647
646 return 0; 648 return 0;
647} 649}
648 650
651static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
652{
653 return iommu_queue_command_sync(iommu, cmd, true);
654}
655
649/* 656/*
650 * This function queues a completion wait command into the command 657 * This function queues a completion wait command into the command
651 * buffer of an IOMMU 658 * buffer of an IOMMU
@@ -661,7 +668,7 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
661 668
662 build_completion_wait(&cmd, (u64)&sem); 669 build_completion_wait(&cmd, (u64)&sem);
663 670
664 ret = iommu_queue_command(iommu, &cmd); 671 ret = iommu_queue_command_sync(iommu, &cmd, false);
665 if (ret) 672 if (ret)
666 return ret; 673 return ret;
667 674
@@ -840,14 +847,9 @@ static void domain_flush_complete(struct protection_domain *domain)
840static void domain_flush_devices(struct protection_domain *domain) 847static void domain_flush_devices(struct protection_domain *domain)
841{ 848{
842 struct iommu_dev_data *dev_data; 849 struct iommu_dev_data *dev_data;
843 unsigned long flags;
844
845 spin_lock_irqsave(&domain->lock, flags);
846 850
847 list_for_each_entry(dev_data, &domain->dev_list, list) 851 list_for_each_entry(dev_data, &domain->dev_list, list)
848 device_flush_dte(dev_data); 852 device_flush_dte(dev_data);
849
850 spin_unlock_irqrestore(&domain->lock, flags);
851} 853}
852 854
853/**************************************************************************** 855/****************************************************************************
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 3dc9befa5aec..6dcc7e2d54de 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -1388,7 +1388,7 @@ int dmar_set_interrupt(struct intel_iommu *iommu)
1388 return ret; 1388 return ret;
1389 } 1389 }
1390 1390
1391 ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu); 1391 ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
1392 if (ret) 1392 if (ret)
1393 printk(KERN_ERR "IOMMU: can't request irq\n"); 1393 printk(KERN_ERR "IOMMU: can't request irq\n");
1394 return ret; 1394 return ret;
diff --git a/drivers/leds/leds-ams-delta.c b/drivers/leds/leds-ams-delta.c
index b9826032450b..8c00937bf7e7 100644
--- a/drivers/leds/leds-ams-delta.c
+++ b/drivers/leds/leds-ams-delta.c
@@ -8,6 +8,7 @@
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10 10
11#include <linux/module.h>
11#include <linux/kernel.h> 12#include <linux/kernel.h>
12#include <linux/init.h> 13#include <linux/init.h>
13#include <linux/platform_device.h> 14#include <linux/platform_device.h>
diff --git a/drivers/leds/leds-bd2802.c b/drivers/leds/leds-bd2802.c
index 3ebe3824662d..ea2185531f82 100644
--- a/drivers/leds/leds-bd2802.c
+++ b/drivers/leds/leds-bd2802.c
@@ -662,6 +662,11 @@ failed_unregister_led1_R:
662static void bd2802_unregister_led_classdev(struct bd2802_led *led) 662static void bd2802_unregister_led_classdev(struct bd2802_led *led)
663{ 663{
664 cancel_work_sync(&led->work); 664 cancel_work_sync(&led->work);
665 led_classdev_unregister(&led->cdev_led2b);
666 led_classdev_unregister(&led->cdev_led2g);
667 led_classdev_unregister(&led->cdev_led2r);
668 led_classdev_unregister(&led->cdev_led1b);
669 led_classdev_unregister(&led->cdev_led1g);
665 led_classdev_unregister(&led->cdev_led1r); 670 led_classdev_unregister(&led->cdev_led1r);
666} 671}
667 672
diff --git a/drivers/leds/leds-hp6xx.c b/drivers/leds/leds-hp6xx.c
index e4ce1fd46338..bcfbd3a60eab 100644
--- a/drivers/leds/leds-hp6xx.c
+++ b/drivers/leds/leds-hp6xx.c
@@ -10,6 +10,7 @@
10 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
11 */ 11 */
12 12
13#include <linux/module.h>
13#include <linux/kernel.h> 14#include <linux/kernel.h>
14#include <linux/init.h> 15#include <linux/init.h>
15#include <linux/platform_device.h> 16#include <linux/platform_device.h>
diff --git a/drivers/leds/ledtrig-timer.c b/drivers/leds/ledtrig-timer.c
index d87c9d02f786..328c64c0841c 100644
--- a/drivers/leds/ledtrig-timer.c
+++ b/drivers/leds/ledtrig-timer.c
@@ -41,6 +41,7 @@ static ssize_t led_delay_on_store(struct device *dev,
41 41
42 if (count == size) { 42 if (count == size) {
43 led_blink_set(led_cdev, &state, &led_cdev->blink_delay_off); 43 led_blink_set(led_cdev, &state, &led_cdev->blink_delay_off);
44 led_cdev->blink_delay_on = state;
44 ret = count; 45 ret = count;
45 } 46 }
46 47
@@ -69,6 +70,7 @@ static ssize_t led_delay_off_store(struct device *dev,
69 70
70 if (count == size) { 71 if (count == size) {
71 led_blink_set(led_cdev, &led_cdev->blink_delay_on, &state); 72 led_blink_set(led_cdev, &led_cdev->blink_delay_on, &state);
73 led_cdev->blink_delay_off = state;
72 ret = count; 74 ret = count;
73 } 75 }
74 76
diff --git a/drivers/md/linear.h b/drivers/md/linear.h
index 0ce29b61605a..2f2da05b2ce9 100644
--- a/drivers/md/linear.h
+++ b/drivers/md/linear.h
@@ -10,9 +10,9 @@ typedef struct dev_info dev_info_t;
10 10
11struct linear_private_data 11struct linear_private_data
12{ 12{
13 struct rcu_head rcu;
13 sector_t array_sectors; 14 sector_t array_sectors;
14 dev_info_t disks[0]; 15 dev_info_t disks[0];
15 struct rcu_head rcu;
16}; 16};
17 17
18 18
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 8e221a20f5d9..5404b2295820 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -848,7 +848,7 @@ void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
848 bio->bi_end_io = super_written; 848 bio->bi_end_io = super_written;
849 849
850 atomic_inc(&mddev->pending_writes); 850 atomic_inc(&mddev->pending_writes);
851 submit_bio(REQ_WRITE | REQ_SYNC | REQ_FLUSH | REQ_FUA, bio); 851 submit_bio(WRITE_FLUSH_FUA, bio);
852} 852}
853 853
854void md_super_wait(mddev_t *mddev) 854void md_super_wait(mddev_t *mddev)
@@ -1138,8 +1138,11 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version
1138 ret = 0; 1138 ret = 0;
1139 } 1139 }
1140 rdev->sectors = rdev->sb_start; 1140 rdev->sectors = rdev->sb_start;
1141 /* Limit to 4TB as metadata cannot record more than that */
1142 if (rdev->sectors >= (2ULL << 32))
1143 rdev->sectors = (2ULL << 32) - 2;
1141 1144
1142 if (rdev->sectors < sb->size * 2 && sb->level > 1) 1145 if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
1143 /* "this cannot possibly happen" ... */ 1146 /* "this cannot possibly happen" ... */
1144 ret = -EINVAL; 1147 ret = -EINVAL;
1145 1148
@@ -1173,7 +1176,7 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
1173 mddev->clevel[0] = 0; 1176 mddev->clevel[0] = 0;
1174 mddev->layout = sb->layout; 1177 mddev->layout = sb->layout;
1175 mddev->raid_disks = sb->raid_disks; 1178 mddev->raid_disks = sb->raid_disks;
1176 mddev->dev_sectors = sb->size * 2; 1179 mddev->dev_sectors = ((sector_t)sb->size) * 2;
1177 mddev->events = ev1; 1180 mddev->events = ev1;
1178 mddev->bitmap_info.offset = 0; 1181 mddev->bitmap_info.offset = 0;
1179 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; 1182 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
@@ -1415,6 +1418,11 @@ super_90_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
1415 rdev->sb_start = calc_dev_sboffset(rdev); 1418 rdev->sb_start = calc_dev_sboffset(rdev);
1416 if (!num_sectors || num_sectors > rdev->sb_start) 1419 if (!num_sectors || num_sectors > rdev->sb_start)
1417 num_sectors = rdev->sb_start; 1420 num_sectors = rdev->sb_start;
1421 /* Limit to 4TB as metadata cannot record more than that.
1422 * 4TB == 2^32 KB, or 2*2^32 sectors.
1423 */
1424 if (num_sectors >= (2ULL << 32))
1425 num_sectors = (2ULL << 32) - 2;
1418 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, 1426 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
1419 rdev->sb_page); 1427 rdev->sb_page);
1420 md_super_wait(rdev->mddev); 1428 md_super_wait(rdev->mddev);
@@ -1738,6 +1746,11 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1738 sb->level = cpu_to_le32(mddev->level); 1746 sb->level = cpu_to_le32(mddev->level);
1739 sb->layout = cpu_to_le32(mddev->layout); 1747 sb->layout = cpu_to_le32(mddev->layout);
1740 1748
1749 if (test_bit(WriteMostly, &rdev->flags))
1750 sb->devflags |= WriteMostly1;
1751 else
1752 sb->devflags &= ~WriteMostly1;
1753
1741 if (mddev->bitmap && mddev->bitmap_info.file == NULL) { 1754 if (mddev->bitmap && mddev->bitmap_info.file == NULL) {
1742 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset); 1755 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset);
1743 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET); 1756 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
@@ -2561,7 +2574,10 @@ state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2561 int err = -EINVAL; 2574 int err = -EINVAL;
2562 if (cmd_match(buf, "faulty") && rdev->mddev->pers) { 2575 if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
2563 md_error(rdev->mddev, rdev); 2576 md_error(rdev->mddev, rdev);
2564 err = 0; 2577 if (test_bit(Faulty, &rdev->flags))
2578 err = 0;
2579 else
2580 err = -EBUSY;
2565 } else if (cmd_match(buf, "remove")) { 2581 } else if (cmd_match(buf, "remove")) {
2566 if (rdev->raid_disk >= 0) 2582 if (rdev->raid_disk >= 0)
2567 err = -EBUSY; 2583 err = -EBUSY;
@@ -2584,7 +2600,7 @@ state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2584 err = 0; 2600 err = 0;
2585 } else if (cmd_match(buf, "-blocked")) { 2601 } else if (cmd_match(buf, "-blocked")) {
2586 if (!test_bit(Faulty, &rdev->flags) && 2602 if (!test_bit(Faulty, &rdev->flags) &&
2587 test_bit(BlockedBadBlocks, &rdev->flags)) { 2603 rdev->badblocks.unacked_exist) {
2588 /* metadata handler doesn't understand badblocks, 2604 /* metadata handler doesn't understand badblocks,
2589 * so we need to fail the device 2605 * so we need to fail the device
2590 */ 2606 */
@@ -5983,6 +5999,8 @@ static int set_disk_faulty(mddev_t *mddev, dev_t dev)
5983 return -ENODEV; 5999 return -ENODEV;
5984 6000
5985 md_error(mddev, rdev); 6001 md_error(mddev, rdev);
6002 if (!test_bit(Faulty, &rdev->flags))
6003 return -EBUSY;
5986 return 0; 6004 return 0;
5987} 6005}
5988 6006
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 32323f0afd89..f4622dd8fc59 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1099,12 +1099,11 @@ read_again:
1099 bio_list_add(&conf->pending_bio_list, mbio); 1099 bio_list_add(&conf->pending_bio_list, mbio);
1100 spin_unlock_irqrestore(&conf->device_lock, flags); 1100 spin_unlock_irqrestore(&conf->device_lock, flags);
1101 } 1101 }
1102 r1_bio_write_done(r1_bio); 1102 /* Mustn't call r1_bio_write_done before this next test,
1103 1103 * as it could result in the bio being freed.
1104 /* In case raid1d snuck in to freeze_array */ 1104 */
1105 wake_up(&conf->wait_barrier);
1106
1107 if (sectors_handled < (bio->bi_size >> 9)) { 1105 if (sectors_handled < (bio->bi_size >> 9)) {
1106 r1_bio_write_done(r1_bio);
1108 /* We need another r1_bio. It has already been counted 1107 /* We need another r1_bio. It has already been counted
1109 * in bio->bi_phys_segments 1108 * in bio->bi_phys_segments
1110 */ 1109 */
@@ -1117,6 +1116,11 @@ read_again:
1117 goto retry_write; 1116 goto retry_write;
1118 } 1117 }
1119 1118
1119 r1_bio_write_done(r1_bio);
1120
1121 /* In case raid1d snuck in to freeze_array */
1122 wake_up(&conf->wait_barrier);
1123
1120 if (do_sync || !bitmap || !plugged) 1124 if (do_sync || !bitmap || !plugged)
1121 md_wakeup_thread(mddev->thread); 1125 md_wakeup_thread(mddev->thread);
1122 1126
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 8b29cd4f01c8..d7a8468ddeab 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -337,6 +337,21 @@ static void close_write(r10bio_t *r10_bio)
337 md_write_end(r10_bio->mddev); 337 md_write_end(r10_bio->mddev);
338} 338}
339 339
340static void one_write_done(r10bio_t *r10_bio)
341{
342 if (atomic_dec_and_test(&r10_bio->remaining)) {
343 if (test_bit(R10BIO_WriteError, &r10_bio->state))
344 reschedule_retry(r10_bio);
345 else {
346 close_write(r10_bio);
347 if (test_bit(R10BIO_MadeGood, &r10_bio->state))
348 reschedule_retry(r10_bio);
349 else
350 raid_end_bio_io(r10_bio);
351 }
352 }
353}
354
340static void raid10_end_write_request(struct bio *bio, int error) 355static void raid10_end_write_request(struct bio *bio, int error)
341{ 356{
342 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 357 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
@@ -387,17 +402,7 @@ static void raid10_end_write_request(struct bio *bio, int error)
387 * Let's see if all mirrored write operations have finished 402 * Let's see if all mirrored write operations have finished
388 * already. 403 * already.
389 */ 404 */
390 if (atomic_dec_and_test(&r10_bio->remaining)) { 405 one_write_done(r10_bio);
391 if (test_bit(R10BIO_WriteError, &r10_bio->state))
392 reschedule_retry(r10_bio);
393 else {
394 close_write(r10_bio);
395 if (test_bit(R10BIO_MadeGood, &r10_bio->state))
396 reschedule_retry(r10_bio);
397 else
398 raid_end_bio_io(r10_bio);
399 }
400 }
401 if (dec_rdev) 406 if (dec_rdev)
402 rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev); 407 rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
403} 408}
@@ -1127,20 +1132,12 @@ retry_write:
1127 spin_unlock_irqrestore(&conf->device_lock, flags); 1132 spin_unlock_irqrestore(&conf->device_lock, flags);
1128 } 1133 }
1129 1134
1130 if (atomic_dec_and_test(&r10_bio->remaining)) { 1135 /* Don't remove the bias on 'remaining' (one_write_done) until
1131 /* This matches the end of raid10_end_write_request() */ 1136 * after checking if we need to go around again.
1132 bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector, 1137 */
1133 r10_bio->sectors,
1134 !test_bit(R10BIO_Degraded, &r10_bio->state),
1135 0);
1136 md_write_end(mddev);
1137 raid_end_bio_io(r10_bio);
1138 }
1139
1140 /* In case raid10d snuck in to freeze_array */
1141 wake_up(&conf->wait_barrier);
1142 1138
1143 if (sectors_handled < (bio->bi_size >> 9)) { 1139 if (sectors_handled < (bio->bi_size >> 9)) {
1140 one_write_done(r10_bio);
1144 /* We need another r10_bio. It has already been counted 1141 /* We need another r10_bio. It has already been counted
1145 * in bio->bi_phys_segments. 1142 * in bio->bi_phys_segments.
1146 */ 1143 */
@@ -1154,6 +1151,10 @@ retry_write:
1154 r10_bio->state = 0; 1151 r10_bio->state = 0;
1155 goto retry_write; 1152 goto retry_write;
1156 } 1153 }
1154 one_write_done(r10_bio);
1155
1156 /* In case raid10d snuck in to freeze_array */
1157 wake_up(&conf->wait_barrier);
1157 1158
1158 if (do_sync || !mddev->bitmap || !plugged) 1159 if (do_sync || !mddev->bitmap || !plugged)
1159 md_wakeup_thread(mddev->thread); 1160 md_wakeup_thread(mddev->thread);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index dbae459fb02d..43709fa6b6df 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3336,7 +3336,7 @@ static void handle_stripe(struct stripe_head *sh)
3336 3336
3337finish: 3337finish:
3338 /* wait for this device to become unblocked */ 3338 /* wait for this device to become unblocked */
3339 if (unlikely(s.blocked_rdev)) 3339 if (conf->mddev->external && unlikely(s.blocked_rdev))
3340 md_wait_for_blocked_rdev(s.blocked_rdev, conf->mddev); 3340 md_wait_for_blocked_rdev(s.blocked_rdev, conf->mddev);
3341 3341
3342 if (s.handle_bad_blocks) 3342 if (s.handle_bad_blocks)
diff --git a/drivers/media/dvb/dvb-usb/vp7045.c b/drivers/media/dvb/dvb-usb/vp7045.c
index 3db89e3cb0bb..536c16c943bd 100644
--- a/drivers/media/dvb/dvb-usb/vp7045.c
+++ b/drivers/media/dvb/dvb-usb/vp7045.c
@@ -224,26 +224,8 @@ static struct dvb_usb_device_properties vp7045_properties;
224static int vp7045_usb_probe(struct usb_interface *intf, 224static int vp7045_usb_probe(struct usb_interface *intf,
225 const struct usb_device_id *id) 225 const struct usb_device_id *id)
226{ 226{
227 struct dvb_usb_device *d; 227 return dvb_usb_device_init(intf, &vp7045_properties,
228 int ret = dvb_usb_device_init(intf, &vp7045_properties, 228 THIS_MODULE, NULL, adapter_nr);
229 THIS_MODULE, &d, adapter_nr);
230 if (ret)
231 return ret;
232
233 d->priv = kmalloc(20, GFP_KERNEL);
234 if (!d->priv) {
235 dvb_usb_device_exit(intf);
236 return -ENOMEM;
237 }
238
239 return ret;
240}
241
242static void vp7045_usb_disconnect(struct usb_interface *intf)
243{
244 struct dvb_usb_device *d = usb_get_intfdata(intf);
245 kfree(d->priv);
246 dvb_usb_device_exit(intf);
247} 229}
248 230
249static struct usb_device_id vp7045_usb_table [] = { 231static struct usb_device_id vp7045_usb_table [] = {
@@ -258,7 +240,7 @@ MODULE_DEVICE_TABLE(usb, vp7045_usb_table);
258static struct dvb_usb_device_properties vp7045_properties = { 240static struct dvb_usb_device_properties vp7045_properties = {
259 .usb_ctrl = CYPRESS_FX2, 241 .usb_ctrl = CYPRESS_FX2,
260 .firmware = "dvb-usb-vp7045-01.fw", 242 .firmware = "dvb-usb-vp7045-01.fw",
261 .size_of_priv = sizeof(u8 *), 243 .size_of_priv = 20,
262 244
263 .num_adapters = 1, 245 .num_adapters = 1,
264 .adapter = { 246 .adapter = {
@@ -305,7 +287,7 @@ static struct dvb_usb_device_properties vp7045_properties = {
305static struct usb_driver vp7045_usb_driver = { 287static struct usb_driver vp7045_usb_driver = {
306 .name = "dvb_usb_vp7045", 288 .name = "dvb_usb_vp7045",
307 .probe = vp7045_usb_probe, 289 .probe = vp7045_usb_probe,
308 .disconnect = vp7045_usb_disconnect, 290 .disconnect = dvb_usb_device_exit,
309 .id_table = vp7045_usb_table, 291 .id_table = vp7045_usb_table,
310}; 292};
311 293
diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c
index eae05b500476..144f3f55d765 100644
--- a/drivers/media/rc/nuvoton-cir.c
+++ b/drivers/media/rc/nuvoton-cir.c
@@ -618,7 +618,6 @@ static void nvt_dump_rx_buf(struct nvt_dev *nvt)
618static void nvt_process_rx_ir_data(struct nvt_dev *nvt) 618static void nvt_process_rx_ir_data(struct nvt_dev *nvt)
619{ 619{
620 DEFINE_IR_RAW_EVENT(rawir); 620 DEFINE_IR_RAW_EVENT(rawir);
621 unsigned int count;
622 u32 carrier; 621 u32 carrier;
623 u8 sample; 622 u8 sample;
624 int i; 623 int i;
@@ -631,65 +630,38 @@ static void nvt_process_rx_ir_data(struct nvt_dev *nvt)
631 if (nvt->carrier_detect_enabled) 630 if (nvt->carrier_detect_enabled)
632 carrier = nvt_rx_carrier_detect(nvt); 631 carrier = nvt_rx_carrier_detect(nvt);
633 632
634 count = nvt->pkts; 633 nvt_dbg_verbose("Processing buffer of len %d", nvt->pkts);
635 nvt_dbg_verbose("Processing buffer of len %d", count);
636 634
637 init_ir_raw_event(&rawir); 635 init_ir_raw_event(&rawir);
638 636
639 for (i = 0; i < count; i++) { 637 for (i = 0; i < nvt->pkts; i++) {
640 nvt->pkts--;
641 sample = nvt->buf[i]; 638 sample = nvt->buf[i];
642 639
643 rawir.pulse = ((sample & BUF_PULSE_BIT) != 0); 640 rawir.pulse = ((sample & BUF_PULSE_BIT) != 0);
644 rawir.duration = US_TO_NS((sample & BUF_LEN_MASK) 641 rawir.duration = US_TO_NS((sample & BUF_LEN_MASK)
645 * SAMPLE_PERIOD); 642 * SAMPLE_PERIOD);
646 643
647 if ((sample & BUF_LEN_MASK) == BUF_LEN_MASK) { 644 nvt_dbg("Storing %s with duration %d",
648 if (nvt->rawir.pulse == rawir.pulse) 645 rawir.pulse ? "pulse" : "space", rawir.duration);
649 nvt->rawir.duration += rawir.duration;
650 else {
651 nvt->rawir.duration = rawir.duration;
652 nvt->rawir.pulse = rawir.pulse;
653 }
654 continue;
655 }
656
657 rawir.duration += nvt->rawir.duration;
658 646
659 init_ir_raw_event(&nvt->rawir); 647 ir_raw_event_store_with_filter(nvt->rdev, &rawir);
660 nvt->rawir.duration = 0;
661 nvt->rawir.pulse = rawir.pulse;
662
663 if (sample == BUF_PULSE_BIT)
664 rawir.pulse = false;
665
666 if (rawir.duration) {
667 nvt_dbg("Storing %s with duration %d",
668 rawir.pulse ? "pulse" : "space",
669 rawir.duration);
670
671 ir_raw_event_store_with_filter(nvt->rdev, &rawir);
672 }
673 648
674 /* 649 /*
675 * BUF_PULSE_BIT indicates end of IR data, BUF_REPEAT_BYTE 650 * BUF_PULSE_BIT indicates end of IR data, BUF_REPEAT_BYTE
676 * indicates end of IR signal, but new data incoming. In both 651 * indicates end of IR signal, but new data incoming. In both
677 * cases, it means we're ready to call ir_raw_event_handle 652 * cases, it means we're ready to call ir_raw_event_handle
678 */ 653 */
679 if ((sample == BUF_PULSE_BIT) && nvt->pkts) { 654 if ((sample == BUF_PULSE_BIT) && (i + 1 < nvt->pkts)) {
680 nvt_dbg("Calling ir_raw_event_handle (signal end)\n"); 655 nvt_dbg("Calling ir_raw_event_handle (signal end)\n");
681 ir_raw_event_handle(nvt->rdev); 656 ir_raw_event_handle(nvt->rdev);
682 } 657 }
683 } 658 }
684 659
660 nvt->pkts = 0;
661
685 nvt_dbg("Calling ir_raw_event_handle (buffer empty)\n"); 662 nvt_dbg("Calling ir_raw_event_handle (buffer empty)\n");
686 ir_raw_event_handle(nvt->rdev); 663 ir_raw_event_handle(nvt->rdev);
687 664
688 if (nvt->pkts) {
689 nvt_dbg("Odd, pkts should be 0 now... (its %u)", nvt->pkts);
690 nvt->pkts = 0;
691 }
692
693 nvt_dbg_verbose("%s done", __func__); 665 nvt_dbg_verbose("%s done", __func__);
694} 666}
695 667
@@ -1048,7 +1020,6 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
1048 1020
1049 spin_lock_init(&nvt->nvt_lock); 1021 spin_lock_init(&nvt->nvt_lock);
1050 spin_lock_init(&nvt->tx.lock); 1022 spin_lock_init(&nvt->tx.lock);
1051 init_ir_raw_event(&nvt->rawir);
1052 1023
1053 ret = -EBUSY; 1024 ret = -EBUSY;
1054 /* now claim resources */ 1025 /* now claim resources */
diff --git a/drivers/media/rc/nuvoton-cir.h b/drivers/media/rc/nuvoton-cir.h
index 1241fc89a36c..0d5e0872a2ea 100644
--- a/drivers/media/rc/nuvoton-cir.h
+++ b/drivers/media/rc/nuvoton-cir.h
@@ -67,7 +67,6 @@ static int debug;
67struct nvt_dev { 67struct nvt_dev {
68 struct pnp_dev *pdev; 68 struct pnp_dev *pdev;
69 struct rc_dev *rdev; 69 struct rc_dev *rdev;
70 struct ir_raw_event rawir;
71 70
72 spinlock_t nvt_lock; 71 spinlock_t nvt_lock;
73 72
diff --git a/drivers/media/video/gspca/ov519.c b/drivers/media/video/gspca/ov519.c
index 0800433b2092..18305c89083c 100644
--- a/drivers/media/video/gspca/ov519.c
+++ b/drivers/media/video/gspca/ov519.c
@@ -2858,7 +2858,6 @@ static void ov7xx0_configure(struct sd *sd)
2858 case 0x60: 2858 case 0x60:
2859 PDEBUG(D_PROBE, "Sensor is a OV7660"); 2859 PDEBUG(D_PROBE, "Sensor is a OV7660");
2860 sd->sensor = SEN_OV7660; 2860 sd->sensor = SEN_OV7660;
2861 sd->invert_led = 0;
2862 break; 2861 break;
2863 default: 2862 default:
2864 PDEBUG(D_PROBE, "Unknown sensor: 0x76%x", low); 2863 PDEBUG(D_PROBE, "Unknown sensor: 0x76%x", low);
@@ -3337,7 +3336,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
3337 case BRIDGE_OV519: 3336 case BRIDGE_OV519:
3338 cam->cam_mode = ov519_vga_mode; 3337 cam->cam_mode = ov519_vga_mode;
3339 cam->nmodes = ARRAY_SIZE(ov519_vga_mode); 3338 cam->nmodes = ARRAY_SIZE(ov519_vga_mode);
3340 sd->invert_led = !sd->invert_led;
3341 break; 3339 break;
3342 case BRIDGE_OVFX2: 3340 case BRIDGE_OVFX2:
3343 cam->cam_mode = ov519_vga_mode; 3341 cam->cam_mode = ov519_vga_mode;
@@ -5005,24 +5003,24 @@ static const struct sd_desc sd_desc = {
5005/* -- module initialisation -- */ 5003/* -- module initialisation -- */
5006static const struct usb_device_id device_table[] = { 5004static const struct usb_device_id device_table[] = {
5007 {USB_DEVICE(0x041e, 0x4003), .driver_info = BRIDGE_W9968CF }, 5005 {USB_DEVICE(0x041e, 0x4003), .driver_info = BRIDGE_W9968CF },
5008 {USB_DEVICE(0x041e, 0x4052), .driver_info = BRIDGE_OV519 }, 5006 {USB_DEVICE(0x041e, 0x4052),
5009 {USB_DEVICE(0x041e, 0x405f),
5010 .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED }, 5007 .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED },
5008 {USB_DEVICE(0x041e, 0x405f), .driver_info = BRIDGE_OV519 },
5011 {USB_DEVICE(0x041e, 0x4060), .driver_info = BRIDGE_OV519 }, 5009 {USB_DEVICE(0x041e, 0x4060), .driver_info = BRIDGE_OV519 },
5012 {USB_DEVICE(0x041e, 0x4061), .driver_info = BRIDGE_OV519 }, 5010 {USB_DEVICE(0x041e, 0x4061), .driver_info = BRIDGE_OV519 },
5013 {USB_DEVICE(0x041e, 0x4064), 5011 {USB_DEVICE(0x041e, 0x4064), .driver_info = BRIDGE_OV519 },
5014 .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED },
5015 {USB_DEVICE(0x041e, 0x4067), .driver_info = BRIDGE_OV519 }, 5012 {USB_DEVICE(0x041e, 0x4067), .driver_info = BRIDGE_OV519 },
5016 {USB_DEVICE(0x041e, 0x4068), 5013 {USB_DEVICE(0x041e, 0x4068), .driver_info = BRIDGE_OV519 },
5014 {USB_DEVICE(0x045e, 0x028c),
5017 .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED }, 5015 .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED },
5018 {USB_DEVICE(0x045e, 0x028c), .driver_info = BRIDGE_OV519 },
5019 {USB_DEVICE(0x054c, 0x0154), .driver_info = BRIDGE_OV519 }, 5016 {USB_DEVICE(0x054c, 0x0154), .driver_info = BRIDGE_OV519 },
5020 {USB_DEVICE(0x054c, 0x0155), 5017 {USB_DEVICE(0x054c, 0x0155), .driver_info = BRIDGE_OV519 },
5021 .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED },
5022 {USB_DEVICE(0x05a9, 0x0511), .driver_info = BRIDGE_OV511 }, 5018 {USB_DEVICE(0x05a9, 0x0511), .driver_info = BRIDGE_OV511 },
5023 {USB_DEVICE(0x05a9, 0x0518), .driver_info = BRIDGE_OV518 }, 5019 {USB_DEVICE(0x05a9, 0x0518), .driver_info = BRIDGE_OV518 },
5024 {USB_DEVICE(0x05a9, 0x0519), .driver_info = BRIDGE_OV519 }, 5020 {USB_DEVICE(0x05a9, 0x0519),
5025 {USB_DEVICE(0x05a9, 0x0530), .driver_info = BRIDGE_OV519 }, 5021 .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED },
5022 {USB_DEVICE(0x05a9, 0x0530),
5023 .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED },
5026 {USB_DEVICE(0x05a9, 0x2800), .driver_info = BRIDGE_OVFX2 }, 5024 {USB_DEVICE(0x05a9, 0x2800), .driver_info = BRIDGE_OVFX2 },
5027 {USB_DEVICE(0x05a9, 0x4519), .driver_info = BRIDGE_OV519 }, 5025 {USB_DEVICE(0x05a9, 0x4519), .driver_info = BRIDGE_OV519 },
5028 {USB_DEVICE(0x05a9, 0x8519), .driver_info = BRIDGE_OV519 }, 5026 {USB_DEVICE(0x05a9, 0x8519), .driver_info = BRIDGE_OV519 },
diff --git a/drivers/media/video/gspca/sonixj.c b/drivers/media/video/gspca/sonixj.c
index 81b8a600783b..c477ad11f103 100644
--- a/drivers/media/video/gspca/sonixj.c
+++ b/drivers/media/video/gspca/sonixj.c
@@ -2386,7 +2386,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
2386 reg_w1(gspca_dev, 0x01, 0x22); 2386 reg_w1(gspca_dev, 0x01, 0x22);
2387 msleep(100); 2387 msleep(100);
2388 reg01 = SCL_SEL_OD | S_PDN_INV; 2388 reg01 = SCL_SEL_OD | S_PDN_INV;
2389 reg17 &= MCK_SIZE_MASK; 2389 reg17 &= ~MCK_SIZE_MASK;
2390 reg17 |= 0x04; /* clock / 4 */ 2390 reg17 |= 0x04; /* clock / 4 */
2391 break; 2391 break;
2392 } 2392 }
@@ -2532,6 +2532,10 @@ static int sd_start(struct gspca_dev *gspca_dev)
2532 if (!mode) { /* if 640x480 */ 2532 if (!mode) { /* if 640x480 */
2533 reg17 &= ~MCK_SIZE_MASK; 2533 reg17 &= ~MCK_SIZE_MASK;
2534 reg17 |= 0x04; /* clock / 4 */ 2534 reg17 |= 0x04; /* clock / 4 */
2535 } else {
2536 reg01 &= ~SYS_SEL_48M; /* clk 24Mz */
2537 reg17 &= ~MCK_SIZE_MASK;
2538 reg17 |= 0x02; /* clock / 2 */
2535 } 2539 }
2536 break; 2540 break;
2537 case SENSOR_OV7630: 2541 case SENSOR_OV7630:
diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c
index b5ef36222440..b3a5ecdb33ac 100644
--- a/drivers/media/video/omap/omap_vout.c
+++ b/drivers/media/video/omap/omap_vout.c
@@ -2194,19 +2194,6 @@ static int __init omap_vout_probe(struct platform_device *pdev)
2194 "'%s' Display already enabled\n", 2194 "'%s' Display already enabled\n",
2195 def_display->name); 2195 def_display->name);
2196 } 2196 }
2197 /* set the update mode */
2198 if (def_display->caps &
2199 OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) {
2200 if (dssdrv->enable_te)
2201 dssdrv->enable_te(def_display, 0);
2202 if (dssdrv->set_update_mode)
2203 dssdrv->set_update_mode(def_display,
2204 OMAP_DSS_UPDATE_MANUAL);
2205 } else {
2206 if (dssdrv->set_update_mode)
2207 dssdrv->set_update_mode(def_display,
2208 OMAP_DSS_UPDATE_AUTO);
2209 }
2210 } 2197 }
2211 } 2198 }
2212 2199
diff --git a/drivers/media/video/omap3isp/ispccdc.c b/drivers/media/video/omap3isp/ispccdc.c
index 9d3459de04b2..80796eb0c53e 100644
--- a/drivers/media/video/omap3isp/ispccdc.c
+++ b/drivers/media/video/omap3isp/ispccdc.c
@@ -31,6 +31,7 @@
31#include <linux/dma-mapping.h> 31#include <linux/dma-mapping.h>
32#include <linux/mm.h> 32#include <linux/mm.h>
33#include <linux/sched.h> 33#include <linux/sched.h>
34#include <linux/slab.h>
34#include <media/v4l2-event.h> 35#include <media/v4l2-event.h>
35 36
36#include "isp.h" 37#include "isp.h"
diff --git a/drivers/media/video/pwc/pwc-v4l.c b/drivers/media/video/pwc/pwc-v4l.c
index e9a0e94b9995..8c70e64444e7 100644
--- a/drivers/media/video/pwc/pwc-v4l.c
+++ b/drivers/media/video/pwc/pwc-v4l.c
@@ -338,7 +338,7 @@ int pwc_init_controls(struct pwc_device *pdev)
338 if (pdev->restore_factory) 338 if (pdev->restore_factory)
339 pdev->restore_factory->flags = V4L2_CTRL_FLAG_UPDATE; 339 pdev->restore_factory->flags = V4L2_CTRL_FLAG_UPDATE;
340 340
341 if (!pdev->features & FEATURE_MOTOR_PANTILT) 341 if (!(pdev->features & FEATURE_MOTOR_PANTILT))
342 return hdl->error; 342 return hdl->error;
343 343
344 /* Motor pan / tilt / reset */ 344 /* Motor pan / tilt / reset */
diff --git a/drivers/media/video/uvc/uvc_driver.c b/drivers/media/video/uvc/uvc_driver.c
index d29f9c2d0854..e4100b1f68df 100644
--- a/drivers/media/video/uvc/uvc_driver.c
+++ b/drivers/media/video/uvc/uvc_driver.c
@@ -1961,7 +1961,7 @@ static int __uvc_resume(struct usb_interface *intf, int reset)
1961 1961
1962 list_for_each_entry(stream, &dev->streams, list) { 1962 list_for_each_entry(stream, &dev->streams, list) {
1963 if (stream->intf == intf) 1963 if (stream->intf == intf)
1964 return uvc_video_resume(stream); 1964 return uvc_video_resume(stream, reset);
1965 } 1965 }
1966 1966
1967 uvc_trace(UVC_TRACE_SUSPEND, "Resume: video streaming USB interface " 1967 uvc_trace(UVC_TRACE_SUSPEND, "Resume: video streaming USB interface "
diff --git a/drivers/media/video/uvc/uvc_entity.c b/drivers/media/video/uvc/uvc_entity.c
index 48fea373c25a..29e239911d0e 100644
--- a/drivers/media/video/uvc/uvc_entity.c
+++ b/drivers/media/video/uvc/uvc_entity.c
@@ -49,7 +49,7 @@ static int uvc_mc_register_entity(struct uvc_video_chain *chain,
49 if (remote == NULL) 49 if (remote == NULL)
50 return -EINVAL; 50 return -EINVAL;
51 51
52 source = (UVC_ENTITY_TYPE(remote) != UVC_TT_STREAMING) 52 source = (UVC_ENTITY_TYPE(remote) == UVC_TT_STREAMING)
53 ? (remote->vdev ? &remote->vdev->entity : NULL) 53 ? (remote->vdev ? &remote->vdev->entity : NULL)
54 : &remote->subdev.entity; 54 : &remote->subdev.entity;
55 if (source == NULL) 55 if (source == NULL)
diff --git a/drivers/media/video/uvc/uvc_video.c b/drivers/media/video/uvc/uvc_video.c
index 8244167c8915..ffd1158628b6 100644
--- a/drivers/media/video/uvc/uvc_video.c
+++ b/drivers/media/video/uvc/uvc_video.c
@@ -1104,10 +1104,18 @@ int uvc_video_suspend(struct uvc_streaming *stream)
1104 * buffers, making sure userspace applications are notified of the problem 1104 * buffers, making sure userspace applications are notified of the problem
1105 * instead of waiting forever. 1105 * instead of waiting forever.
1106 */ 1106 */
1107int uvc_video_resume(struct uvc_streaming *stream) 1107int uvc_video_resume(struct uvc_streaming *stream, int reset)
1108{ 1108{
1109 int ret; 1109 int ret;
1110 1110
1111 /* If the bus has been reset on resume, set the alternate setting to 0.
1112 * This should be the default value, but some devices crash or otherwise
1113 * misbehave if they don't receive a SET_INTERFACE request before any
1114 * other video control request.
1115 */
1116 if (reset)
1117 usb_set_interface(stream->dev->udev, stream->intfnum, 0);
1118
1111 stream->frozen = 0; 1119 stream->frozen = 0;
1112 1120
1113 ret = uvc_commit_video(stream, &stream->ctrl); 1121 ret = uvc_commit_video(stream, &stream->ctrl);
diff --git a/drivers/media/video/uvc/uvcvideo.h b/drivers/media/video/uvc/uvcvideo.h
index df32a43ca86a..cbdd49bf8b67 100644
--- a/drivers/media/video/uvc/uvcvideo.h
+++ b/drivers/media/video/uvc/uvcvideo.h
@@ -638,7 +638,7 @@ extern void uvc_mc_cleanup_entity(struct uvc_entity *entity);
638/* Video */ 638/* Video */
639extern int uvc_video_init(struct uvc_streaming *stream); 639extern int uvc_video_init(struct uvc_streaming *stream);
640extern int uvc_video_suspend(struct uvc_streaming *stream); 640extern int uvc_video_suspend(struct uvc_streaming *stream);
641extern int uvc_video_resume(struct uvc_streaming *stream); 641extern int uvc_video_resume(struct uvc_streaming *stream, int reset);
642extern int uvc_video_enable(struct uvc_streaming *stream, int enable); 642extern int uvc_video_enable(struct uvc_streaming *stream, int enable);
643extern int uvc_probe_video(struct uvc_streaming *stream, 643extern int uvc_probe_video(struct uvc_streaming *stream,
644 struct uvc_streaming_control *probe); 644 struct uvc_streaming_control *probe);
diff --git a/drivers/media/video/v4l2-dev.c b/drivers/media/video/v4l2-dev.c
index 06f14008b346..d72156517726 100644
--- a/drivers/media/video/v4l2-dev.c
+++ b/drivers/media/video/v4l2-dev.c
@@ -173,6 +173,17 @@ static void v4l2_device_release(struct device *cd)
173 media_device_unregister_entity(&vdev->entity); 173 media_device_unregister_entity(&vdev->entity);
174#endif 174#endif
175 175
176 /* Do not call v4l2_device_put if there is no release callback set.
177 * Drivers that have no v4l2_device release callback might free the
178 * v4l2_dev instance in the video_device release callback below, so we
179 * must perform this check here.
180 *
181 * TODO: In the long run all drivers that use v4l2_device should use the
182 * v4l2_device release callback. This check will then be unnecessary.
183 */
184 if (v4l2_dev->release == NULL)
185 v4l2_dev = NULL;
186
176 /* Release video_device and perform other 187 /* Release video_device and perform other
177 cleanups as needed. */ 188 cleanups as needed. */
178 vdev->release(vdev); 189 vdev->release(vdev);
diff --git a/drivers/media/video/v4l2-device.c b/drivers/media/video/v4l2-device.c
index c72856c41434..e6a2c3b302d4 100644
--- a/drivers/media/video/v4l2-device.c
+++ b/drivers/media/video/v4l2-device.c
@@ -38,6 +38,7 @@ int v4l2_device_register(struct device *dev, struct v4l2_device *v4l2_dev)
38 mutex_init(&v4l2_dev->ioctl_lock); 38 mutex_init(&v4l2_dev->ioctl_lock);
39 v4l2_prio_init(&v4l2_dev->prio); 39 v4l2_prio_init(&v4l2_dev->prio);
40 kref_init(&v4l2_dev->ref); 40 kref_init(&v4l2_dev->ref);
41 get_device(dev);
41 v4l2_dev->dev = dev; 42 v4l2_dev->dev = dev;
42 if (dev == NULL) { 43 if (dev == NULL) {
43 /* If dev == NULL, then name must be filled in by the caller */ 44 /* If dev == NULL, then name must be filled in by the caller */
@@ -93,6 +94,7 @@ void v4l2_device_disconnect(struct v4l2_device *v4l2_dev)
93 94
94 if (dev_get_drvdata(v4l2_dev->dev) == v4l2_dev) 95 if (dev_get_drvdata(v4l2_dev->dev) == v4l2_dev)
95 dev_set_drvdata(v4l2_dev->dev, NULL); 96 dev_set_drvdata(v4l2_dev->dev, NULL);
97 put_device(v4l2_dev->dev);
96 v4l2_dev->dev = NULL; 98 v4l2_dev->dev = NULL;
97} 99}
98EXPORT_SYMBOL_GPL(v4l2_device_disconnect); 100EXPORT_SYMBOL_GPL(v4l2_device_disconnect);
diff --git a/drivers/media/video/via-camera.c b/drivers/media/video/via-camera.c
index 85d3048c1d67..bb7f17f2a33c 100644
--- a/drivers/media/video/via-camera.c
+++ b/drivers/media/video/via-camera.c
@@ -1332,6 +1332,8 @@ static __devinit bool viacam_serial_is_enabled(void)
1332 struct pci_bus *pbus = pci_find_bus(0, 0); 1332 struct pci_bus *pbus = pci_find_bus(0, 0);
1333 u8 cbyte; 1333 u8 cbyte;
1334 1334
1335 if (!pbus)
1336 return false;
1335 pci_bus_read_config_byte(pbus, VIACAM_SERIAL_DEVFN, 1337 pci_bus_read_config_byte(pbus, VIACAM_SERIAL_DEVFN,
1336 VIACAM_SERIAL_CREG, &cbyte); 1338 VIACAM_SERIAL_CREG, &cbyte);
1337 if ((cbyte & VIACAM_SERIAL_BIT) == 0) 1339 if ((cbyte & VIACAM_SERIAL_BIT) == 0)
diff --git a/drivers/mfd/jz4740-adc.c b/drivers/mfd/jz4740-adc.c
index 21131c7b0f1e..563654c9b19e 100644
--- a/drivers/mfd/jz4740-adc.c
+++ b/drivers/mfd/jz4740-adc.c
@@ -273,7 +273,7 @@ static int __devinit jz4740_adc_probe(struct platform_device *pdev)
273 ct->regs.ack = JZ_REG_ADC_STATUS; 273 ct->regs.ack = JZ_REG_ADC_STATUS;
274 ct->chip.irq_mask = irq_gc_mask_set_bit; 274 ct->chip.irq_mask = irq_gc_mask_set_bit;
275 ct->chip.irq_unmask = irq_gc_mask_clr_bit; 275 ct->chip.irq_unmask = irq_gc_mask_clr_bit;
276 ct->chip.irq_ack = irq_gc_ack; 276 ct->chip.irq_ack = irq_gc_ack_set_bit;
277 277
278 irq_setup_generic_chip(gc, IRQ_MSK(5), 0, 0, IRQ_NOPROBE | IRQ_LEVEL); 278 irq_setup_generic_chip(gc, IRQ_MSK(5), 0, 0, IRQ_NOPROBE | IRQ_LEVEL);
279 279
diff --git a/drivers/mfd/max8997.c b/drivers/mfd/max8997.c
index 5d1fca0277ef..f83103b8970d 100644
--- a/drivers/mfd/max8997.c
+++ b/drivers/mfd/max8997.c
@@ -135,10 +135,13 @@ static int max8997_i2c_probe(struct i2c_client *i2c,
135 max8997->dev = &i2c->dev; 135 max8997->dev = &i2c->dev;
136 max8997->i2c = i2c; 136 max8997->i2c = i2c;
137 max8997->type = id->driver_data; 137 max8997->type = id->driver_data;
138 max8997->irq = i2c->irq;
138 139
139 if (!pdata) 140 if (!pdata)
140 goto err; 141 goto err;
141 142
143 max8997->irq_base = pdata->irq_base;
144 max8997->ono = pdata->ono;
142 max8997->wakeup = pdata->wakeup; 145 max8997->wakeup = pdata->wakeup;
143 146
144 mutex_init(&max8997->iolock); 147 mutex_init(&max8997->iolock);
@@ -152,6 +155,8 @@ static int max8997_i2c_probe(struct i2c_client *i2c,
152 155
153 pm_runtime_set_active(max8997->dev); 156 pm_runtime_set_active(max8997->dev);
154 157
158 max8997_irq_init(max8997);
159
155 mfd_add_devices(max8997->dev, -1, max8997_devs, 160 mfd_add_devices(max8997->dev, -1, max8997_devs,
156 ARRAY_SIZE(max8997_devs), 161 ARRAY_SIZE(max8997_devs),
157 NULL, 0); 162 NULL, 0);
diff --git a/drivers/mfd/menelaus.c b/drivers/mfd/menelaus.c
index 9cee8e7f0bcb..af5d9d061371 100644
--- a/drivers/mfd/menelaus.c
+++ b/drivers/mfd/menelaus.c
@@ -44,7 +44,7 @@
44 44
45#include <asm/mach/irq.h> 45#include <asm/mach/irq.h>
46 46
47#include <mach/gpio.h> 47#include <asm/gpio.h>
48#include <plat/menelaus.h> 48#include <plat/menelaus.h>
49 49
50#define DRIVER_NAME "menelaus" 50#define DRIVER_NAME "menelaus"
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
index 29601e7d606d..86e14583a082 100644
--- a/drivers/mfd/omap-usb-host.c
+++ b/drivers/mfd/omap-usb-host.c
@@ -17,6 +17,7 @@
17 * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */ 18 */
19#include <linux/kernel.h> 19#include <linux/kernel.h>
20#include <linux/module.h>
20#include <linux/types.h> 21#include <linux/types.h>
21#include <linux/slab.h> 22#include <linux/slab.h>
22#include <linux/delay.h> 23#include <linux/delay.h>
@@ -676,7 +677,6 @@ static void usbhs_omap_tll_init(struct device *dev, u8 tll_channel_count)
676 | OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF 677 | OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF
677 | OMAP_TLL_CHANNEL_CONF_ULPIDDRMODE); 678 | OMAP_TLL_CHANNEL_CONF_ULPIDDRMODE);
678 679
679 reg |= (1 << (i + 1));
680 } else 680 } else
681 continue; 681 continue;
682 682
diff --git a/drivers/mfd/tps65910-irq.c b/drivers/mfd/tps65910-irq.c
index 2bfad5c86cc7..a56be931551c 100644
--- a/drivers/mfd/tps65910-irq.c
+++ b/drivers/mfd/tps65910-irq.c
@@ -178,8 +178,10 @@ int tps65910_irq_init(struct tps65910 *tps65910, int irq,
178 switch (tps65910_chip_id(tps65910)) { 178 switch (tps65910_chip_id(tps65910)) {
179 case TPS65910: 179 case TPS65910:
180 tps65910->irq_num = TPS65910_NUM_IRQ; 180 tps65910->irq_num = TPS65910_NUM_IRQ;
181 break;
181 case TPS65911: 182 case TPS65911:
182 tps65910->irq_num = TPS65911_NUM_IRQ; 183 tps65910->irq_num = TPS65911_NUM_IRQ;
184 break;
183 } 185 }
184 186
185 /* Register with genirq */ 187 /* Register with genirq */
diff --git a/drivers/mfd/twl4030-madc.c b/drivers/mfd/twl4030-madc.c
index b5d598c3aa71..7cbf2aa9e64f 100644
--- a/drivers/mfd/twl4030-madc.c
+++ b/drivers/mfd/twl4030-madc.c
@@ -510,8 +510,9 @@ int twl4030_madc_conversion(struct twl4030_madc_request *req)
510 u8 ch_msb, ch_lsb; 510 u8 ch_msb, ch_lsb;
511 int ret; 511 int ret;
512 512
513 if (!req) 513 if (!req || !twl4030_madc)
514 return -EINVAL; 514 return -EINVAL;
515
515 mutex_lock(&twl4030_madc->lock); 516 mutex_lock(&twl4030_madc->lock);
516 if (req->method < TWL4030_MADC_RT || req->method > TWL4030_MADC_SW2) { 517 if (req->method < TWL4030_MADC_RT || req->method > TWL4030_MADC_SW2) {
517 ret = -EINVAL; 518 ret = -EINVAL;
@@ -706,6 +707,8 @@ static int __devinit twl4030_madc_probe(struct platform_device *pdev)
706 if (!madc) 707 if (!madc)
707 return -ENOMEM; 708 return -ENOMEM;
708 709
710 madc->dev = &pdev->dev;
711
709 /* 712 /*
710 * Phoenix provides 2 interrupt lines. The first one is connected to 713 * Phoenix provides 2 interrupt lines. The first one is connected to
711 * the OMAP. The other one can be connected to the other processor such 714 * the OMAP. The other one can be connected to the other processor such
diff --git a/drivers/mfd/wm8350-gpio.c b/drivers/mfd/wm8350-gpio.c
index ebf99bef392f..d584f6b4d6e2 100644
--- a/drivers/mfd/wm8350-gpio.c
+++ b/drivers/mfd/wm8350-gpio.c
@@ -37,7 +37,7 @@ static int gpio_set_dir(struct wm8350 *wm8350, int gpio, int dir)
37 return ret; 37 return ret;
38} 38}
39 39
40static int gpio_set_debounce(struct wm8350 *wm8350, int gpio, int db) 40static int wm8350_gpio_set_debounce(struct wm8350 *wm8350, int gpio, int db)
41{ 41{
42 if (db == WM8350_GPIO_DEBOUNCE_ON) 42 if (db == WM8350_GPIO_DEBOUNCE_ON)
43 return wm8350_set_bits(wm8350, WM8350_GPIO_DEBOUNCE, 43 return wm8350_set_bits(wm8350, WM8350_GPIO_DEBOUNCE,
@@ -210,7 +210,7 @@ int wm8350_gpio_config(struct wm8350 *wm8350, int gpio, int dir, int func,
210 goto err; 210 goto err;
211 if (gpio_set_polarity(wm8350, gpio, pol)) 211 if (gpio_set_polarity(wm8350, gpio, pol))
212 goto err; 212 goto err;
213 if (gpio_set_debounce(wm8350, gpio, debounce)) 213 if (wm8350_gpio_set_debounce(wm8350, gpio, debounce))
214 goto err; 214 goto err;
215 if (gpio_set_dir(wm8350, gpio, dir)) 215 if (gpio_set_dir(wm8350, gpio, dir))
216 goto err; 216 goto err;
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 0a4d86c6c4a4..2d6423c2d193 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -146,6 +146,7 @@ config PHANTOM
146 146
147config INTEL_MID_PTI 147config INTEL_MID_PTI
148 tristate "Parallel Trace Interface for MIPI P1149.7 cJTAG standard" 148 tristate "Parallel Trace Interface for MIPI P1149.7 cJTAG standard"
149 depends on PCI
149 default n 150 default n
150 help 151 help
151 The PTI (Parallel Trace Interface) driver directs 152 The PTI (Parallel Trace Interface) driver directs
diff --git a/drivers/misc/ab8500-pwm.c b/drivers/misc/ab8500-pwm.c
index 54e3d05b63cc..35903154ca2e 100644
--- a/drivers/misc/ab8500-pwm.c
+++ b/drivers/misc/ab8500-pwm.c
@@ -164,5 +164,5 @@ subsys_initcall(ab8500_pwm_init);
164module_exit(ab8500_pwm_exit); 164module_exit(ab8500_pwm_exit);
165MODULE_AUTHOR("Arun MURTHY <arun.murthy@stericsson.com>"); 165MODULE_AUTHOR("Arun MURTHY <arun.murthy@stericsson.com>");
166MODULE_DESCRIPTION("AB8500 Pulse Width Modulation Driver"); 166MODULE_DESCRIPTION("AB8500 Pulse Width Modulation Driver");
167MODULE_ALIAS("AB8500 PWM driver"); 167MODULE_ALIAS("platform:ab8500-pwm");
168MODULE_LICENSE("GPL v2"); 168MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/cb710/core.c b/drivers/misc/cb710/core.c
index efec4139c3f6..68cd05b6d829 100644
--- a/drivers/misc/cb710/core.c
+++ b/drivers/misc/cb710/core.c
@@ -33,7 +33,7 @@ EXPORT_SYMBOL_GPL(cb710_pci_update_config_reg);
33static int __devinit cb710_pci_configure(struct pci_dev *pdev) 33static int __devinit cb710_pci_configure(struct pci_dev *pdev)
34{ 34{
35 unsigned int devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0); 35 unsigned int devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
36 struct pci_dev *pdev0 = pci_get_slot(pdev->bus, devfn); 36 struct pci_dev *pdev0;
37 u32 val; 37 u32 val;
38 38
39 cb710_pci_update_config_reg(pdev, 0x48, 39 cb710_pci_update_config_reg(pdev, 0x48,
@@ -43,6 +43,7 @@ static int __devinit cb710_pci_configure(struct pci_dev *pdev)
43 if (val & 0x80000000) 43 if (val & 0x80000000)
44 return 0; 44 return 0;
45 45
46 pdev0 = pci_get_slot(pdev->bus, devfn);
46 if (!pdev0) 47 if (!pdev0)
47 return -ENODEV; 48 return -ENODEV;
48 49
diff --git a/drivers/misc/fsa9480.c b/drivers/misc/fsa9480.c
index 5325a7e70dcf..27dc0d21aafa 100644
--- a/drivers/misc/fsa9480.c
+++ b/drivers/misc/fsa9480.c
@@ -455,7 +455,7 @@ static int __devinit fsa9480_probe(struct i2c_client *client,
455 455
456fail2: 456fail2:
457 if (client->irq) 457 if (client->irq)
458 free_irq(client->irq, NULL); 458 free_irq(client->irq, usbsw);
459fail1: 459fail1:
460 i2c_set_clientdata(client, NULL); 460 i2c_set_clientdata(client, NULL);
461 kfree(usbsw); 461 kfree(usbsw);
@@ -466,7 +466,7 @@ static int __devexit fsa9480_remove(struct i2c_client *client)
466{ 466{
467 struct fsa9480_usbsw *usbsw = i2c_get_clientdata(client); 467 struct fsa9480_usbsw *usbsw = i2c_get_clientdata(client);
468 if (client->irq) 468 if (client->irq)
469 free_irq(client->irq, NULL); 469 free_irq(client->irq, usbsw);
470 i2c_set_clientdata(client, NULL); 470 i2c_set_clientdata(client, NULL);
471 471
472 sysfs_remove_group(&client->dev.kobj, &fsa9480_group); 472 sysfs_remove_group(&client->dev.kobj, &fsa9480_group);
diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
index b928bc14e97b..8b51cd62d067 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
@@ -375,12 +375,14 @@ void lis3lv02d_poweron(struct lis3lv02d *lis3)
375 * both have been read. So the value read will always be correct. 375 * both have been read. So the value read will always be correct.
376 * Set BOOT bit to refresh factory tuning values. 376 * Set BOOT bit to refresh factory tuning values.
377 */ 377 */
378 lis3->read(lis3, CTRL_REG2, &reg); 378 if (lis3->pdata) {
379 if (lis3->whoami == WAI_12B) 379 lis3->read(lis3, CTRL_REG2, &reg);
380 reg |= CTRL2_BDU | CTRL2_BOOT; 380 if (lis3->whoami == WAI_12B)
381 else 381 reg |= CTRL2_BDU | CTRL2_BOOT;
382 reg |= CTRL2_BOOT_8B; 382 else
383 lis3->write(lis3, CTRL_REG2, reg); 383 reg |= CTRL2_BOOT_8B;
384 lis3->write(lis3, CTRL_REG2, reg);
385 }
384 386
385 /* LIS3 power on delay is quite long */ 387 /* LIS3 power on delay is quite long */
386 msleep(lis3->pwron_delay / lis3lv02d_get_odr()); 388 msleep(lis3->pwron_delay / lis3lv02d_get_odr());
diff --git a/drivers/misc/pti.c b/drivers/misc/pti.c
index 8653bd0b1a33..0b56e3f43573 100644
--- a/drivers/misc/pti.c
+++ b/drivers/misc/pti.c
@@ -33,6 +33,8 @@
33#include <linux/mutex.h> 33#include <linux/mutex.h>
34#include <linux/miscdevice.h> 34#include <linux/miscdevice.h>
35#include <linux/pti.h> 35#include <linux/pti.h>
36#include <linux/slab.h>
37#include <linux/uaccess.h>
36 38
37#define DRIVERNAME "pti" 39#define DRIVERNAME "pti"
38#define PCINAME "pciPTI" 40#define PCINAME "pciPTI"
@@ -163,6 +165,11 @@ static void pti_write_to_aperture(struct pti_masterchannel *mc,
163static void pti_control_frame_built_and_sent(struct pti_masterchannel *mc, 165static void pti_control_frame_built_and_sent(struct pti_masterchannel *mc,
164 const char *thread_name) 166 const char *thread_name)
165{ 167{
168 /*
169 * Since we access the comm member in current's task_struct, we only
170 * need to be as large as what 'comm' in that structure is.
171 */
172 char comm[TASK_COMM_LEN];
166 struct pti_masterchannel mccontrol = {.master = CONTROL_ID, 173 struct pti_masterchannel mccontrol = {.master = CONTROL_ID,
167 .channel = 0}; 174 .channel = 0};
168 const char *thread_name_p; 175 const char *thread_name_p;
@@ -170,13 +177,6 @@ static void pti_control_frame_built_and_sent(struct pti_masterchannel *mc,
170 u8 control_frame[CONTROL_FRAME_LEN]; 177 u8 control_frame[CONTROL_FRAME_LEN];
171 178
172 if (!thread_name) { 179 if (!thread_name) {
173 /*
174 * Since we access the comm member in current's task_struct,
175 * we only need to be as large as what 'comm' in that
176 * structure is.
177 */
178 char comm[TASK_COMM_LEN];
179
180 if (!in_interrupt()) 180 if (!in_interrupt())
181 get_task_comm(comm, current); 181 get_task_comm(comm, current);
182 else 182 else
diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
index 54c91ffe4a91..ba168a7d54d4 100644
--- a/drivers/misc/ti-st/st_core.c
+++ b/drivers/misc/ti-st/st_core.c
@@ -338,6 +338,12 @@ void st_int_recv(void *disc_data,
338 /* Unknow packet? */ 338 /* Unknow packet? */
339 default: 339 default:
340 type = *ptr; 340 type = *ptr;
341 if (st_gdata->list[type] == NULL) {
342 pr_err("chip/interface misbehavior dropping"
343 " frame starting with 0x%02x", type);
344 goto done;
345
346 }
341 st_gdata->rx_skb = alloc_skb( 347 st_gdata->rx_skb = alloc_skb(
342 st_gdata->list[type]->max_frame_size, 348 st_gdata->list[type]->max_frame_size,
343 GFP_ATOMIC); 349 GFP_ATOMIC);
@@ -354,6 +360,7 @@ void st_int_recv(void *disc_data,
354 ptr++; 360 ptr++;
355 count--; 361 count--;
356 } 362 }
363done:
357 spin_unlock_irqrestore(&st_gdata->lock, flags); 364 spin_unlock_irqrestore(&st_gdata->lock, flags);
358 pr_debug("done %s", __func__); 365 pr_debug("done %s", __func__);
359 return; 366 return;
@@ -717,9 +724,10 @@ static void st_tty_close(struct tty_struct *tty)
717 */ 724 */
718 spin_lock_irqsave(&st_gdata->lock, flags); 725 spin_lock_irqsave(&st_gdata->lock, flags);
719 for (i = ST_BT; i < ST_MAX_CHANNELS; i++) { 726 for (i = ST_BT; i < ST_MAX_CHANNELS; i++) {
720 if (st_gdata->list[i] != NULL) 727 if (st_gdata->is_registered[i] == true)
721 pr_err("%d not un-registered", i); 728 pr_err("%d not un-registered", i);
722 st_gdata->list[i] = NULL; 729 st_gdata->list[i] = NULL;
730 st_gdata->is_registered[i] = false;
723 } 731 }
724 st_gdata->protos_registered = 0; 732 st_gdata->protos_registered = 0;
725 spin_unlock_irqrestore(&st_gdata->lock, flags); 733 spin_unlock_irqrestore(&st_gdata->lock, flags);
diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c
index 38fd2f04c07e..3a3580566dfc 100644
--- a/drivers/misc/ti-st/st_kim.c
+++ b/drivers/misc/ti-st/st_kim.c
@@ -68,6 +68,7 @@ void validate_firmware_response(struct kim_data_s *kim_gdata)
68 if (unlikely(skb->data[5] != 0)) { 68 if (unlikely(skb->data[5] != 0)) {
69 pr_err("no proper response during fw download"); 69 pr_err("no proper response during fw download");
70 pr_err("data6 %x", skb->data[5]); 70 pr_err("data6 %x", skb->data[5]);
71 kfree_skb(skb);
71 return; /* keep waiting for the proper response */ 72 return; /* keep waiting for the proper response */
72 } 73 }
73 /* becos of all the script being downloaded */ 74 /* becos of all the script being downloaded */
@@ -210,6 +211,7 @@ static long read_local_version(struct kim_data_s *kim_gdata, char *bts_scr_name)
210 pr_err(" waiting for ver info- timed out "); 211 pr_err(" waiting for ver info- timed out ");
211 return -ETIMEDOUT; 212 return -ETIMEDOUT;
212 } 213 }
214 INIT_COMPLETION(kim_gdata->kim_rcvd);
213 215
214 version = 216 version =
215 MAKEWORD(kim_gdata->resp_buffer[13], 217 MAKEWORD(kim_gdata->resp_buffer[13],
@@ -298,6 +300,7 @@ static long download_firmware(struct kim_data_s *kim_gdata)
298 300
299 switch (((struct bts_action *)ptr)->type) { 301 switch (((struct bts_action *)ptr)->type) {
300 case ACTION_SEND_COMMAND: /* action send */ 302 case ACTION_SEND_COMMAND: /* action send */
303 pr_debug("S");
301 action_ptr = &(((struct bts_action *)ptr)->data[0]); 304 action_ptr = &(((struct bts_action *)ptr)->data[0]);
302 if (unlikely 305 if (unlikely
303 (((struct hci_command *)action_ptr)->opcode == 306 (((struct hci_command *)action_ptr)->opcode ==
@@ -335,6 +338,10 @@ static long download_firmware(struct kim_data_s *kim_gdata)
335 release_firmware(kim_gdata->fw_entry); 338 release_firmware(kim_gdata->fw_entry);
336 return -ETIMEDOUT; 339 return -ETIMEDOUT;
337 } 340 }
341 /* reinit completion before sending for the
342 * relevant wait
343 */
344 INIT_COMPLETION(kim_gdata->kim_rcvd);
338 345
339 /* 346 /*
340 * Free space found in uart buffer, call st_int_write 347 * Free space found in uart buffer, call st_int_write
@@ -361,6 +368,7 @@ static long download_firmware(struct kim_data_s *kim_gdata)
361 } 368 }
362 break; 369 break;
363 case ACTION_WAIT_EVENT: /* wait */ 370 case ACTION_WAIT_EVENT: /* wait */
371 pr_debug("W");
364 if (!wait_for_completion_timeout 372 if (!wait_for_completion_timeout
365 (&kim_gdata->kim_rcvd, 373 (&kim_gdata->kim_rcvd,
366 msecs_to_jiffies(CMD_RESP_TIME))) { 374 msecs_to_jiffies(CMD_RESP_TIME))) {
@@ -434,11 +442,17 @@ long st_kim_start(void *kim_data)
434{ 442{
435 long err = 0; 443 long err = 0;
436 long retry = POR_RETRY_COUNT; 444 long retry = POR_RETRY_COUNT;
445 struct ti_st_plat_data *pdata;
437 struct kim_data_s *kim_gdata = (struct kim_data_s *)kim_data; 446 struct kim_data_s *kim_gdata = (struct kim_data_s *)kim_data;
438 447
439 pr_info(" %s", __func__); 448 pr_info(" %s", __func__);
449 pdata = kim_gdata->kim_pdev->dev.platform_data;
440 450
441 do { 451 do {
452 /* platform specific enabling code here */
453 if (pdata->chip_enable)
454 pdata->chip_enable(kim_gdata);
455
442 /* Configure BT nShutdown to HIGH state */ 456 /* Configure BT nShutdown to HIGH state */
443 gpio_set_value(kim_gdata->nshutdown, GPIO_LOW); 457 gpio_set_value(kim_gdata->nshutdown, GPIO_LOW);
444 mdelay(5); /* FIXME: a proper toggle */ 458 mdelay(5); /* FIXME: a proper toggle */
@@ -460,6 +474,12 @@ long st_kim_start(void *kim_data)
460 pr_info("ldisc_install = 0"); 474 pr_info("ldisc_install = 0");
461 sysfs_notify(&kim_gdata->kim_pdev->dev.kobj, 475 sysfs_notify(&kim_gdata->kim_pdev->dev.kobj,
462 NULL, "install"); 476 NULL, "install");
477 /* the following wait is never going to be completed,
478 * since the ldisc was never installed, hence serving
479 * as a mdelay of LDISC_TIME msecs */
480 err = wait_for_completion_timeout
481 (&kim_gdata->ldisc_installed,
482 msecs_to_jiffies(LDISC_TIME));
463 err = -ETIMEDOUT; 483 err = -ETIMEDOUT;
464 continue; 484 continue;
465 } else { 485 } else {
@@ -472,6 +492,13 @@ long st_kim_start(void *kim_data)
472 pr_info("ldisc_install = 0"); 492 pr_info("ldisc_install = 0");
473 sysfs_notify(&kim_gdata->kim_pdev->dev.kobj, 493 sysfs_notify(&kim_gdata->kim_pdev->dev.kobj,
474 NULL, "install"); 494 NULL, "install");
495 /* this wait might be completed, though in the
496 * tty_close() since the ldisc is already
497 * installed */
498 err = wait_for_completion_timeout
499 (&kim_gdata->ldisc_installed,
500 msecs_to_jiffies(LDISC_TIME));
501 err = -EINVAL;
475 continue; 502 continue;
476 } else { /* on success don't retry */ 503 } else { /* on success don't retry */
477 break; 504 break;
@@ -489,6 +516,8 @@ long st_kim_stop(void *kim_data)
489{ 516{
490 long err = 0; 517 long err = 0;
491 struct kim_data_s *kim_gdata = (struct kim_data_s *)kim_data; 518 struct kim_data_s *kim_gdata = (struct kim_data_s *)kim_data;
519 struct ti_st_plat_data *pdata =
520 kim_gdata->kim_pdev->dev.platform_data;
492 521
493 INIT_COMPLETION(kim_gdata->ldisc_installed); 522 INIT_COMPLETION(kim_gdata->ldisc_installed);
494 523
@@ -515,6 +544,10 @@ long st_kim_stop(void *kim_data)
515 gpio_set_value(kim_gdata->nshutdown, GPIO_HIGH); 544 gpio_set_value(kim_gdata->nshutdown, GPIO_HIGH);
516 mdelay(1); 545 mdelay(1);
517 gpio_set_value(kim_gdata->nshutdown, GPIO_LOW); 546 gpio_set_value(kim_gdata->nshutdown, GPIO_LOW);
547
548 /* platform specific disable */
549 if (pdata->chip_disable)
550 pdata->chip_disable(kim_gdata);
518 return err; 551 return err;
519} 552}
520 553
diff --git a/drivers/misc/ti-st/st_ll.c b/drivers/misc/ti-st/st_ll.c
index 3f2495138855..1ff460a8e9c7 100644
--- a/drivers/misc/ti-st/st_ll.c
+++ b/drivers/misc/ti-st/st_ll.c
@@ -22,6 +22,7 @@
22#define pr_fmt(fmt) "(stll) :" fmt 22#define pr_fmt(fmt) "(stll) :" fmt
23#include <linux/skbuff.h> 23#include <linux/skbuff.h>
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/platform_device.h>
25#include <linux/ti_wilink_st.h> 26#include <linux/ti_wilink_st.h>
26 27
27/**********************************************************************/ 28/**********************************************************************/
@@ -37,6 +38,9 @@ static void send_ll_cmd(struct st_data_s *st_data,
37 38
38static void ll_device_want_to_sleep(struct st_data_s *st_data) 39static void ll_device_want_to_sleep(struct st_data_s *st_data)
39{ 40{
41 struct kim_data_s *kim_data;
42 struct ti_st_plat_data *pdata;
43
40 pr_debug("%s", __func__); 44 pr_debug("%s", __func__);
41 /* sanity check */ 45 /* sanity check */
42 if (st_data->ll_state != ST_LL_AWAKE) 46 if (st_data->ll_state != ST_LL_AWAKE)
@@ -46,10 +50,19 @@ static void ll_device_want_to_sleep(struct st_data_s *st_data)
46 send_ll_cmd(st_data, LL_SLEEP_ACK); 50 send_ll_cmd(st_data, LL_SLEEP_ACK);
47 /* update state */ 51 /* update state */
48 st_data->ll_state = ST_LL_ASLEEP; 52 st_data->ll_state = ST_LL_ASLEEP;
53
54 /* communicate to platform about chip asleep */
55 kim_data = st_data->kim_data;
56 pdata = kim_data->kim_pdev->dev.platform_data;
57 if (pdata->chip_asleep)
58 pdata->chip_asleep(NULL);
49} 59}
50 60
51static void ll_device_want_to_wakeup(struct st_data_s *st_data) 61static void ll_device_want_to_wakeup(struct st_data_s *st_data)
52{ 62{
63 struct kim_data_s *kim_data;
64 struct ti_st_plat_data *pdata;
65
53 /* diff actions in diff states */ 66 /* diff actions in diff states */
54 switch (st_data->ll_state) { 67 switch (st_data->ll_state) {
55 case ST_LL_ASLEEP: 68 case ST_LL_ASLEEP:
@@ -70,6 +83,12 @@ static void ll_device_want_to_wakeup(struct st_data_s *st_data)
70 } 83 }
71 /* update state */ 84 /* update state */
72 st_data->ll_state = ST_LL_AWAKE; 85 st_data->ll_state = ST_LL_AWAKE;
86
87 /* communicate to platform about chip wakeup */
88 kim_data = st_data->kim_data;
89 pdata = kim_data->kim_pdev->dev.platform_data;
90 if (pdata->chip_asleep)
91 pdata->chip_awake(NULL);
73} 92}
74 93
75/**********************************************************************/ 94/**********************************************************************/
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 1ff5486213fb..4c1a648d00fc 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -926,6 +926,9 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
926 /* 926 /*
927 * Reliable writes are used to implement Forced Unit Access and 927 * Reliable writes are used to implement Forced Unit Access and
928 * REQ_META accesses, and are supported only on MMCs. 928 * REQ_META accesses, and are supported only on MMCs.
929 *
930 * XXX: this really needs a good explanation of why REQ_META
931 * is treated special.
929 */ 932 */
930 bool do_rel_wr = ((req->cmd_flags & REQ_FUA) || 933 bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
931 (req->cmd_flags & REQ_META)) && 934 (req->cmd_flags & REQ_META)) &&
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index 006a5e9f8ab8..2bf229acd3b8 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -224,7 +224,7 @@ static void mmc_test_prepare_mrq(struct mmc_test_card *test,
224static int mmc_test_busy(struct mmc_command *cmd) 224static int mmc_test_busy(struct mmc_command *cmd)
225{ 225{
226 return !(cmd->resp[0] & R1_READY_FOR_DATA) || 226 return !(cmd->resp[0] & R1_READY_FOR_DATA) ||
227 (R1_CURRENT_STATE(cmd->resp[0]) == 7); 227 (R1_CURRENT_STATE(cmd->resp[0]) == R1_STATE_PRG);
228} 228}
229 229
230/* 230/*
@@ -2900,7 +2900,7 @@ static const struct file_operations mmc_test_fops_testlist = {
2900 .release = single_release, 2900 .release = single_release,
2901}; 2901};
2902 2902
2903static void mmc_test_free_file_test(struct mmc_card *card) 2903static void mmc_test_free_dbgfs_file(struct mmc_card *card)
2904{ 2904{
2905 struct mmc_test_dbgfs_file *df, *dfs; 2905 struct mmc_test_dbgfs_file *df, *dfs;
2906 2906
@@ -2917,34 +2917,21 @@ static void mmc_test_free_file_test(struct mmc_card *card)
2917 mutex_unlock(&mmc_test_lock); 2917 mutex_unlock(&mmc_test_lock);
2918} 2918}
2919 2919
2920static int mmc_test_register_file_test(struct mmc_card *card) 2920static int __mmc_test_register_dbgfs_file(struct mmc_card *card,
2921 const char *name, mode_t mode, const struct file_operations *fops)
2921{ 2922{
2922 struct dentry *file = NULL; 2923 struct dentry *file = NULL;
2923 struct mmc_test_dbgfs_file *df; 2924 struct mmc_test_dbgfs_file *df;
2924 int ret = 0;
2925
2926 mutex_lock(&mmc_test_lock);
2927
2928 if (card->debugfs_root)
2929 file = debugfs_create_file("test", S_IWUSR | S_IRUGO,
2930 card->debugfs_root, card, &mmc_test_fops_test);
2931
2932 if (IS_ERR_OR_NULL(file)) {
2933 dev_err(&card->dev,
2934 "Can't create test. Perhaps debugfs is disabled.\n");
2935 ret = -ENODEV;
2936 goto err;
2937 }
2938 2925
2939 if (card->debugfs_root) 2926 if (card->debugfs_root)
2940 file = debugfs_create_file("testlist", S_IRUGO, 2927 file = debugfs_create_file(name, mode, card->debugfs_root,
2941 card->debugfs_root, card, &mmc_test_fops_testlist); 2928 card, fops);
2942 2929
2943 if (IS_ERR_OR_NULL(file)) { 2930 if (IS_ERR_OR_NULL(file)) {
2944 dev_err(&card->dev, 2931 dev_err(&card->dev,
2945 "Can't create testlist. Perhaps debugfs is disabled.\n"); 2932 "Can't create %s. Perhaps debugfs is disabled.\n",
2946 ret = -ENODEV; 2933 name);
2947 goto err; 2934 return -ENODEV;
2948 } 2935 }
2949 2936
2950 df = kmalloc(sizeof(struct mmc_test_dbgfs_file), GFP_KERNEL); 2937 df = kmalloc(sizeof(struct mmc_test_dbgfs_file), GFP_KERNEL);
@@ -2952,14 +2939,31 @@ static int mmc_test_register_file_test(struct mmc_card *card)
2952 debugfs_remove(file); 2939 debugfs_remove(file);
2953 dev_err(&card->dev, 2940 dev_err(&card->dev,
2954 "Can't allocate memory for internal usage.\n"); 2941 "Can't allocate memory for internal usage.\n");
2955 ret = -ENOMEM; 2942 return -ENOMEM;
2956 goto err;
2957 } 2943 }
2958 2944
2959 df->card = card; 2945 df->card = card;
2960 df->file = file; 2946 df->file = file;
2961 2947
2962 list_add(&df->link, &mmc_test_file_test); 2948 list_add(&df->link, &mmc_test_file_test);
2949 return 0;
2950}
2951
2952static int mmc_test_register_dbgfs_file(struct mmc_card *card)
2953{
2954 int ret;
2955
2956 mutex_lock(&mmc_test_lock);
2957
2958 ret = __mmc_test_register_dbgfs_file(card, "test", S_IWUSR | S_IRUGO,
2959 &mmc_test_fops_test);
2960 if (ret)
2961 goto err;
2962
2963 ret = __mmc_test_register_dbgfs_file(card, "testlist", S_IRUGO,
2964 &mmc_test_fops_testlist);
2965 if (ret)
2966 goto err;
2963 2967
2964err: 2968err:
2965 mutex_unlock(&mmc_test_lock); 2969 mutex_unlock(&mmc_test_lock);
@@ -2974,7 +2978,7 @@ static int mmc_test_probe(struct mmc_card *card)
2974 if (!mmc_card_mmc(card) && !mmc_card_sd(card)) 2978 if (!mmc_card_mmc(card) && !mmc_card_sd(card))
2975 return -ENODEV; 2979 return -ENODEV;
2976 2980
2977 ret = mmc_test_register_file_test(card); 2981 ret = mmc_test_register_dbgfs_file(card);
2978 if (ret) 2982 if (ret)
2979 return ret; 2983 return ret;
2980 2984
@@ -2986,7 +2990,7 @@ static int mmc_test_probe(struct mmc_card *card)
2986static void mmc_test_remove(struct mmc_card *card) 2990static void mmc_test_remove(struct mmc_card *card)
2987{ 2991{
2988 mmc_test_free_result(card); 2992 mmc_test_free_result(card);
2989 mmc_test_free_file_test(card); 2993 mmc_test_free_dbgfs_file(card);
2990} 2994}
2991 2995
2992static struct mmc_driver mmc_driver = { 2996static struct mmc_driver mmc_driver = {
@@ -3006,7 +3010,7 @@ static void __exit mmc_test_exit(void)
3006{ 3010{
3007 /* Clear stalled data if card is still plugged */ 3011 /* Clear stalled data if card is still plugged */
3008 mmc_test_free_result(NULL); 3012 mmc_test_free_result(NULL);
3009 mmc_test_free_file_test(NULL); 3013 mmc_test_free_dbgfs_file(NULL);
3010 3014
3011 mmc_unregister_driver(&mmc_driver); 3015 mmc_unregister_driver(&mmc_driver);
3012} 3016}
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 89bdeaec7182..b27b94078c21 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -133,7 +133,7 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
133 if (mrq->done) 133 if (mrq->done)
134 mrq->done(mrq); 134 mrq->done(mrq);
135 135
136 mmc_host_clk_gate(host); 136 mmc_host_clk_release(host);
137 } 137 }
138} 138}
139 139
@@ -192,7 +192,7 @@ mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
192 mrq->stop->mrq = mrq; 192 mrq->stop->mrq = mrq;
193 } 193 }
194 } 194 }
195 mmc_host_clk_ungate(host); 195 mmc_host_clk_hold(host);
196 led_trigger_event(host->led, LED_FULL); 196 led_trigger_event(host->led, LED_FULL);
197 host->ops->request(host, mrq); 197 host->ops->request(host, mrq);
198} 198}
@@ -728,15 +728,17 @@ static inline void mmc_set_ios(struct mmc_host *host)
728 */ 728 */
729void mmc_set_chip_select(struct mmc_host *host, int mode) 729void mmc_set_chip_select(struct mmc_host *host, int mode)
730{ 730{
731 mmc_host_clk_hold(host);
731 host->ios.chip_select = mode; 732 host->ios.chip_select = mode;
732 mmc_set_ios(host); 733 mmc_set_ios(host);
734 mmc_host_clk_release(host);
733} 735}
734 736
735/* 737/*
736 * Sets the host clock to the highest possible frequency that 738 * Sets the host clock to the highest possible frequency that
737 * is below "hz". 739 * is below "hz".
738 */ 740 */
739void mmc_set_clock(struct mmc_host *host, unsigned int hz) 741static void __mmc_set_clock(struct mmc_host *host, unsigned int hz)
740{ 742{
741 WARN_ON(hz < host->f_min); 743 WARN_ON(hz < host->f_min);
742 744
@@ -747,6 +749,13 @@ void mmc_set_clock(struct mmc_host *host, unsigned int hz)
747 mmc_set_ios(host); 749 mmc_set_ios(host);
748} 750}
749 751
752void mmc_set_clock(struct mmc_host *host, unsigned int hz)
753{
754 mmc_host_clk_hold(host);
755 __mmc_set_clock(host, hz);
756 mmc_host_clk_release(host);
757}
758
750#ifdef CONFIG_MMC_CLKGATE 759#ifdef CONFIG_MMC_CLKGATE
751/* 760/*
752 * This gates the clock by setting it to 0 Hz. 761 * This gates the clock by setting it to 0 Hz.
@@ -779,7 +788,7 @@ void mmc_ungate_clock(struct mmc_host *host)
779 if (host->clk_old) { 788 if (host->clk_old) {
780 BUG_ON(host->ios.clock); 789 BUG_ON(host->ios.clock);
781 /* This call will also set host->clk_gated to false */ 790 /* This call will also set host->clk_gated to false */
782 mmc_set_clock(host, host->clk_old); 791 __mmc_set_clock(host, host->clk_old);
783 } 792 }
784} 793}
785 794
@@ -807,8 +816,10 @@ void mmc_set_ungated(struct mmc_host *host)
807 */ 816 */
808void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode) 817void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
809{ 818{
819 mmc_host_clk_hold(host);
810 host->ios.bus_mode = mode; 820 host->ios.bus_mode = mode;
811 mmc_set_ios(host); 821 mmc_set_ios(host);
822 mmc_host_clk_release(host);
812} 823}
813 824
814/* 825/*
@@ -816,8 +827,10 @@ void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
816 */ 827 */
817void mmc_set_bus_width(struct mmc_host *host, unsigned int width) 828void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
818{ 829{
830 mmc_host_clk_hold(host);
819 host->ios.bus_width = width; 831 host->ios.bus_width = width;
820 mmc_set_ios(host); 832 mmc_set_ios(host);
833 mmc_host_clk_release(host);
821} 834}
822 835
823/** 836/**
@@ -1015,8 +1028,10 @@ u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
1015 1028
1016 ocr &= 3 << bit; 1029 ocr &= 3 << bit;
1017 1030
1031 mmc_host_clk_hold(host);
1018 host->ios.vdd = bit; 1032 host->ios.vdd = bit;
1019 mmc_set_ios(host); 1033 mmc_set_ios(host);
1034 mmc_host_clk_release(host);
1020 } else { 1035 } else {
1021 pr_warning("%s: host doesn't support card's voltages\n", 1036 pr_warning("%s: host doesn't support card's voltages\n",
1022 mmc_hostname(host)); 1037 mmc_hostname(host));
@@ -1063,8 +1078,10 @@ int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, bool cmd11
1063 */ 1078 */
1064void mmc_set_timing(struct mmc_host *host, unsigned int timing) 1079void mmc_set_timing(struct mmc_host *host, unsigned int timing)
1065{ 1080{
1081 mmc_host_clk_hold(host);
1066 host->ios.timing = timing; 1082 host->ios.timing = timing;
1067 mmc_set_ios(host); 1083 mmc_set_ios(host);
1084 mmc_host_clk_release(host);
1068} 1085}
1069 1086
1070/* 1087/*
@@ -1072,8 +1089,10 @@ void mmc_set_timing(struct mmc_host *host, unsigned int timing)
1072 */ 1089 */
1073void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type) 1090void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
1074{ 1091{
1092 mmc_host_clk_hold(host);
1075 host->ios.drv_type = drv_type; 1093 host->ios.drv_type = drv_type;
1076 mmc_set_ios(host); 1094 mmc_set_ios(host);
1095 mmc_host_clk_release(host);
1077} 1096}
1078 1097
1079/* 1098/*
@@ -1091,6 +1110,8 @@ static void mmc_power_up(struct mmc_host *host)
1091{ 1110{
1092 int bit; 1111 int bit;
1093 1112
1113 mmc_host_clk_hold(host);
1114
1094 /* If ocr is set, we use it */ 1115 /* If ocr is set, we use it */
1095 if (host->ocr) 1116 if (host->ocr)
1096 bit = ffs(host->ocr) - 1; 1117 bit = ffs(host->ocr) - 1;
@@ -1126,10 +1147,14 @@ static void mmc_power_up(struct mmc_host *host)
1126 * time required to reach a stable voltage. 1147 * time required to reach a stable voltage.
1127 */ 1148 */
1128 mmc_delay(10); 1149 mmc_delay(10);
1150
1151 mmc_host_clk_release(host);
1129} 1152}
1130 1153
1131static void mmc_power_off(struct mmc_host *host) 1154static void mmc_power_off(struct mmc_host *host)
1132{ 1155{
1156 mmc_host_clk_hold(host);
1157
1133 host->ios.clock = 0; 1158 host->ios.clock = 0;
1134 host->ios.vdd = 0; 1159 host->ios.vdd = 0;
1135 1160
@@ -1147,6 +1172,8 @@ static void mmc_power_off(struct mmc_host *host)
1147 host->ios.bus_width = MMC_BUS_WIDTH_1; 1172 host->ios.bus_width = MMC_BUS_WIDTH_1;
1148 host->ios.timing = MMC_TIMING_LEGACY; 1173 host->ios.timing = MMC_TIMING_LEGACY;
1149 mmc_set_ios(host); 1174 mmc_set_ios(host);
1175
1176 mmc_host_clk_release(host);
1150} 1177}
1151 1178
1152/* 1179/*
@@ -1502,7 +1529,7 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from,
1502 goto out; 1529 goto out;
1503 } 1530 }
1504 } while (!(cmd.resp[0] & R1_READY_FOR_DATA) || 1531 } while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
1505 R1_CURRENT_STATE(cmd.resp[0]) == 7); 1532 R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG);
1506out: 1533out:
1507 return err; 1534 return err;
1508} 1535}
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index b29d3e8fd3a2..793d0a0dad8d 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -119,14 +119,14 @@ static void mmc_host_clk_gate_work(struct work_struct *work)
119} 119}
120 120
121/** 121/**
122 * mmc_host_clk_ungate - ungate hardware MCI clocks 122 * mmc_host_clk_hold - ungate hardware MCI clocks
123 * @host: host to ungate. 123 * @host: host to ungate.
124 * 124 *
125 * Makes sure the host ios.clock is restored to a non-zero value 125 * Makes sure the host ios.clock is restored to a non-zero value
126 * past this call. Increase clock reference count and ungate clock 126 * past this call. Increase clock reference count and ungate clock
127 * if we're the first user. 127 * if we're the first user.
128 */ 128 */
129void mmc_host_clk_ungate(struct mmc_host *host) 129void mmc_host_clk_hold(struct mmc_host *host)
130{ 130{
131 unsigned long flags; 131 unsigned long flags;
132 132
@@ -164,14 +164,14 @@ static bool mmc_host_may_gate_card(struct mmc_card *card)
164} 164}
165 165
166/** 166/**
167 * mmc_host_clk_gate - gate off hardware MCI clocks 167 * mmc_host_clk_release - gate off hardware MCI clocks
168 * @host: host to gate. 168 * @host: host to gate.
169 * 169 *
170 * Calls the host driver with ios.clock set to zero as often as possible 170 * Calls the host driver with ios.clock set to zero as often as possible
171 * in order to gate off hardware MCI clocks. Decrease clock reference 171 * in order to gate off hardware MCI clocks. Decrease clock reference
172 * count and schedule disabling of clock. 172 * count and schedule disabling of clock.
173 */ 173 */
174void mmc_host_clk_gate(struct mmc_host *host) 174void mmc_host_clk_release(struct mmc_host *host)
175{ 175{
176 unsigned long flags; 176 unsigned long flags;
177 177
@@ -179,7 +179,7 @@ void mmc_host_clk_gate(struct mmc_host *host)
179 host->clk_requests--; 179 host->clk_requests--;
180 if (mmc_host_may_gate_card(host->card) && 180 if (mmc_host_may_gate_card(host->card) &&
181 !host->clk_requests) 181 !host->clk_requests)
182 schedule_work(&host->clk_gate_work); 182 queue_work(system_nrt_wq, &host->clk_gate_work);
183 spin_unlock_irqrestore(&host->clk_lock, flags); 183 spin_unlock_irqrestore(&host->clk_lock, flags);
184} 184}
185 185
@@ -231,7 +231,7 @@ static inline void mmc_host_clk_exit(struct mmc_host *host)
231 if (cancel_work_sync(&host->clk_gate_work)) 231 if (cancel_work_sync(&host->clk_gate_work))
232 mmc_host_clk_gate_delayed(host); 232 mmc_host_clk_gate_delayed(host);
233 if (host->clk_gated) 233 if (host->clk_gated)
234 mmc_host_clk_ungate(host); 234 mmc_host_clk_hold(host);
235 /* There should be only one user now */ 235 /* There should be only one user now */
236 WARN_ON(host->clk_requests > 1); 236 WARN_ON(host->clk_requests > 1);
237} 237}
diff --git a/drivers/mmc/core/host.h b/drivers/mmc/core/host.h
index de199f911928..fb8a5cd2e4a1 100644
--- a/drivers/mmc/core/host.h
+++ b/drivers/mmc/core/host.h
@@ -16,16 +16,16 @@ int mmc_register_host_class(void);
16void mmc_unregister_host_class(void); 16void mmc_unregister_host_class(void);
17 17
18#ifdef CONFIG_MMC_CLKGATE 18#ifdef CONFIG_MMC_CLKGATE
19void mmc_host_clk_ungate(struct mmc_host *host); 19void mmc_host_clk_hold(struct mmc_host *host);
20void mmc_host_clk_gate(struct mmc_host *host); 20void mmc_host_clk_release(struct mmc_host *host);
21unsigned int mmc_host_clk_rate(struct mmc_host *host); 21unsigned int mmc_host_clk_rate(struct mmc_host *host);
22 22
23#else 23#else
24static inline void mmc_host_clk_ungate(struct mmc_host *host) 24static inline void mmc_host_clk_hold(struct mmc_host *host)
25{ 25{
26} 26}
27 27
28static inline void mmc_host_clk_gate(struct mmc_host *host) 28static inline void mmc_host_clk_release(struct mmc_host *host)
29{ 29{
30} 30}
31 31
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index aa7d1d79b8c5..5700b1cbdfec 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -259,7 +259,7 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
259 } 259 }
260 260
261 card->ext_csd.rev = ext_csd[EXT_CSD_REV]; 261 card->ext_csd.rev = ext_csd[EXT_CSD_REV];
262 if (card->ext_csd.rev > 5) { 262 if (card->ext_csd.rev > 6) {
263 printk(KERN_ERR "%s: unrecognised EXT_CSD revision %d\n", 263 printk(KERN_ERR "%s: unrecognised EXT_CSD revision %d\n",
264 mmc_hostname(card->host), card->ext_csd.rev); 264 mmc_hostname(card->host), card->ext_csd.rev);
265 err = -EINVAL; 265 err = -EINVAL;
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 845ce7c533b9..770c3d06f5dc 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -407,7 +407,7 @@ int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
407 break; 407 break;
408 if (mmc_host_is_spi(card->host)) 408 if (mmc_host_is_spi(card->host))
409 break; 409 break;
410 } while (R1_CURRENT_STATE(status) == 7); 410 } while (R1_CURRENT_STATE(status) == R1_STATE_PRG);
411 411
412 if (mmc_host_is_spi(card->host)) { 412 if (mmc_host_is_spi(card->host)) {
413 if (status & R1_SPI_ILLEGAL_COMMAND) 413 if (status & R1_SPI_ILLEGAL_COMMAND)
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 633975ff2bb3..0370e03e3142 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -469,56 +469,75 @@ static int sd_select_driver_type(struct mmc_card *card, u8 *status)
469 return 0; 469 return 0;
470} 470}
471 471
472static int sd_set_bus_speed_mode(struct mmc_card *card, u8 *status) 472static void sd_update_bus_speed_mode(struct mmc_card *card)
473{ 473{
474 unsigned int bus_speed = 0, timing = 0;
475 int err;
476
477 /* 474 /*
478 * If the host doesn't support any of the UHS-I modes, fallback on 475 * If the host doesn't support any of the UHS-I modes, fallback on
479 * default speed. 476 * default speed.
480 */ 477 */
481 if (!(card->host->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | 478 if (!(card->host->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
482 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50))) 479 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50))) {
483 return 0; 480 card->sd_bus_speed = 0;
481 return;
482 }
484 483
485 if ((card->host->caps & MMC_CAP_UHS_SDR104) && 484 if ((card->host->caps & MMC_CAP_UHS_SDR104) &&
486 (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104)) { 485 (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104)) {
487 bus_speed = UHS_SDR104_BUS_SPEED; 486 card->sd_bus_speed = UHS_SDR104_BUS_SPEED;
488 timing = MMC_TIMING_UHS_SDR104;
489 card->sw_caps.uhs_max_dtr = UHS_SDR104_MAX_DTR;
490 } else if ((card->host->caps & MMC_CAP_UHS_DDR50) && 487 } else if ((card->host->caps & MMC_CAP_UHS_DDR50) &&
491 (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_DDR50)) { 488 (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_DDR50)) {
492 bus_speed = UHS_DDR50_BUS_SPEED; 489 card->sd_bus_speed = UHS_DDR50_BUS_SPEED;
493 timing = MMC_TIMING_UHS_DDR50;
494 card->sw_caps.uhs_max_dtr = UHS_DDR50_MAX_DTR;
495 } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 | 490 } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
496 MMC_CAP_UHS_SDR50)) && (card->sw_caps.sd3_bus_mode & 491 MMC_CAP_UHS_SDR50)) && (card->sw_caps.sd3_bus_mode &
497 SD_MODE_UHS_SDR50)) { 492 SD_MODE_UHS_SDR50)) {
498 bus_speed = UHS_SDR50_BUS_SPEED; 493 card->sd_bus_speed = UHS_SDR50_BUS_SPEED;
499 timing = MMC_TIMING_UHS_SDR50;
500 card->sw_caps.uhs_max_dtr = UHS_SDR50_MAX_DTR;
501 } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 | 494 } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
502 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25)) && 495 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25)) &&
503 (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR25)) { 496 (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR25)) {
504 bus_speed = UHS_SDR25_BUS_SPEED; 497 card->sd_bus_speed = UHS_SDR25_BUS_SPEED;
505 timing = MMC_TIMING_UHS_SDR25;
506 card->sw_caps.uhs_max_dtr = UHS_SDR25_MAX_DTR;
507 } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 | 498 } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
508 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25 | 499 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25 |
509 MMC_CAP_UHS_SDR12)) && (card->sw_caps.sd3_bus_mode & 500 MMC_CAP_UHS_SDR12)) && (card->sw_caps.sd3_bus_mode &
510 SD_MODE_UHS_SDR12)) { 501 SD_MODE_UHS_SDR12)) {
511 bus_speed = UHS_SDR12_BUS_SPEED; 502 card->sd_bus_speed = UHS_SDR12_BUS_SPEED;
512 timing = MMC_TIMING_UHS_SDR12; 503 }
513 card->sw_caps.uhs_max_dtr = UHS_SDR12_MAX_DTR; 504}
505
506static int sd_set_bus_speed_mode(struct mmc_card *card, u8 *status)
507{
508 int err;
509 unsigned int timing = 0;
510
511 switch (card->sd_bus_speed) {
512 case UHS_SDR104_BUS_SPEED:
513 timing = MMC_TIMING_UHS_SDR104;
514 card->sw_caps.uhs_max_dtr = UHS_SDR104_MAX_DTR;
515 break;
516 case UHS_DDR50_BUS_SPEED:
517 timing = MMC_TIMING_UHS_DDR50;
518 card->sw_caps.uhs_max_dtr = UHS_DDR50_MAX_DTR;
519 break;
520 case UHS_SDR50_BUS_SPEED:
521 timing = MMC_TIMING_UHS_SDR50;
522 card->sw_caps.uhs_max_dtr = UHS_SDR50_MAX_DTR;
523 break;
524 case UHS_SDR25_BUS_SPEED:
525 timing = MMC_TIMING_UHS_SDR25;
526 card->sw_caps.uhs_max_dtr = UHS_SDR25_MAX_DTR;
527 break;
528 case UHS_SDR12_BUS_SPEED:
529 timing = MMC_TIMING_UHS_SDR12;
530 card->sw_caps.uhs_max_dtr = UHS_SDR12_MAX_DTR;
531 break;
532 default:
533 return 0;
514 } 534 }
515 535
516 card->sd_bus_speed = bus_speed; 536 err = mmc_sd_switch(card, 1, 0, card->sd_bus_speed, status);
517 err = mmc_sd_switch(card, 1, 0, bus_speed, status);
518 if (err) 537 if (err)
519 return err; 538 return err;
520 539
521 if ((status[16] & 0xF) != bus_speed) 540 if ((status[16] & 0xF) != card->sd_bus_speed)
522 printk(KERN_WARNING "%s: Problem setting bus speed mode!\n", 541 printk(KERN_WARNING "%s: Problem setting bus speed mode!\n",
523 mmc_hostname(card->host)); 542 mmc_hostname(card->host));
524 else { 543 else {
@@ -618,18 +637,24 @@ static int mmc_sd_init_uhs_card(struct mmc_card *card)
618 mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4); 637 mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4);
619 } 638 }
620 639
640 /*
641 * Select the bus speed mode depending on host
642 * and card capability.
643 */
644 sd_update_bus_speed_mode(card);
645
621 /* Set the driver strength for the card */ 646 /* Set the driver strength for the card */
622 err = sd_select_driver_type(card, status); 647 err = sd_select_driver_type(card, status);
623 if (err) 648 if (err)
624 goto out; 649 goto out;
625 650
626 /* Set bus speed mode of the card */ 651 /* Set current limit for the card */
627 err = sd_set_bus_speed_mode(card, status); 652 err = sd_set_current_limit(card, status);
628 if (err) 653 if (err)
629 goto out; 654 goto out;
630 655
631 /* Set current limit for the card */ 656 /* Set bus speed mode of the card */
632 err = sd_set_current_limit(card, status); 657 err = sd_set_bus_speed_mode(card, status);
633 if (err) 658 if (err)
634 goto out; 659 goto out;
635 660
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 77f0b6b1681d..ff0f714b012c 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -62,7 +62,7 @@ struct idmac_desc {
62 62
63 u32 des1; /* Buffer sizes */ 63 u32 des1; /* Buffer sizes */
64#define IDMAC_SET_BUFFER1_SIZE(d, s) \ 64#define IDMAC_SET_BUFFER1_SIZE(d, s) \
65 ((d)->des1 = ((d)->des1 & 0x03ffc000) | ((s) & 0x3fff)) 65 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
66 66
67 u32 des2; /* buffer 1 physical address */ 67 u32 des2; /* buffer 1 physical address */
68 68
@@ -699,7 +699,7 @@ static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
699 } 699 }
700 700
701 /* DDR mode set */ 701 /* DDR mode set */
702 if (ios->ddr) { 702 if (ios->timing == MMC_TIMING_UHS_DDR50) {
703 regs = mci_readl(slot->host, UHS_REG); 703 regs = mci_readl(slot->host, UHS_REG);
704 regs |= (0x1 << slot->id) << 16; 704 regs |= (0x1 << slot->id) << 16;
705 mci_writel(slot->host, UHS_REG, regs); 705 mci_writel(slot->host, UHS_REG, regs);
@@ -1646,7 +1646,7 @@ static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id)
1646 mmc->caps |= MMC_CAP_4_BIT_DATA; 1646 mmc->caps |= MMC_CAP_4_BIT_DATA;
1647 1647
1648 if (host->pdata->quirks & DW_MCI_QUIRK_HIGHSPEED) 1648 if (host->pdata->quirks & DW_MCI_QUIRK_HIGHSPEED)
1649 mmc->caps |= MMC_CAP_SD_HIGHSPEED; 1649 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
1650 1650
1651#ifdef CONFIG_MMC_DW_IDMAC 1651#ifdef CONFIG_MMC_DW_IDMAC
1652 mmc->max_segs = host->ring_size; 1652 mmc->max_segs = host->ring_size;
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index a6c329040140..2dba999caf2c 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -33,7 +33,7 @@
33 33
34#include <plat/board.h> 34#include <plat/board.h>
35#include <plat/mmc.h> 35#include <plat/mmc.h>
36#include <mach/gpio.h> 36#include <asm/gpio.h>
37#include <plat/dma.h> 37#include <plat/dma.h>
38#include <plat/mux.h> 38#include <plat/mux.h>
39#include <plat/fpga.h> 39#include <plat/fpga.h>
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 9ebfb4b482f5..4dc0028086a3 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -16,6 +16,7 @@
16#include <linux/err.h> 16#include <linux/err.h>
17#include <linux/clk.h> 17#include <linux/clk.h>
18#include <linux/gpio.h> 18#include <linux/gpio.h>
19#include <linux/module.h>
19#include <linux/slab.h> 20#include <linux/slab.h>
20#include <linux/mmc/host.h> 21#include <linux/mmc/host.h>
21#include <linux/mmc/mmc.h> 22#include <linux/mmc/mmc.h>
@@ -27,6 +28,7 @@
27#include "sdhci-pltfm.h" 28#include "sdhci-pltfm.h"
28#include "sdhci-esdhc.h" 29#include "sdhci-esdhc.h"
29 30
31#define SDHCI_CTRL_D3CD 0x08
30/* VENDOR SPEC register */ 32/* VENDOR SPEC register */
31#define SDHCI_VENDOR_SPEC 0xC0 33#define SDHCI_VENDOR_SPEC 0xC0
32#define SDHCI_VENDOR_SPEC_SDIO_QUIRK 0x00000002 34#define SDHCI_VENDOR_SPEC_SDIO_QUIRK 0x00000002
@@ -141,13 +143,32 @@ static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg)
141 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 143 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
142 struct pltfm_imx_data *imx_data = pltfm_host->priv; 144 struct pltfm_imx_data *imx_data = pltfm_host->priv;
143 struct esdhc_platform_data *boarddata = &imx_data->boarddata; 145 struct esdhc_platform_data *boarddata = &imx_data->boarddata;
144 146 u32 data;
145 if (unlikely((reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE) 147
146 && (boarddata->cd_type == ESDHC_CD_GPIO))) 148 if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE)) {
147 /* 149 if (boarddata->cd_type == ESDHC_CD_GPIO)
148 * these interrupts won't work with a custom card_detect gpio 150 /*
149 */ 151 * These interrupts won't work with a custom
150 val &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT); 152 * card_detect gpio (only applied to mx25/35)
153 */
154 val &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
155
156 if (val & SDHCI_INT_CARD_INT) {
157 /*
158 * Clear and then set D3CD bit to avoid missing the
159 * card interrupt. This is a eSDHC controller problem
160 * so we need to apply the following workaround: clear
161 * and set D3CD bit will make eSDHC re-sample the card
162 * interrupt. In case a card interrupt was lost,
163 * re-sample it by the following steps.
164 */
165 data = readl(host->ioaddr + SDHCI_HOST_CONTROL);
166 data &= ~SDHCI_CTRL_D3CD;
167 writel(data, host->ioaddr + SDHCI_HOST_CONTROL);
168 data |= SDHCI_CTRL_D3CD;
169 writel(data, host->ioaddr + SDHCI_HOST_CONTROL);
170 }
171 }
151 172
152 if (unlikely((imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT) 173 if (unlikely((imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT)
153 && (reg == SDHCI_INT_STATUS) 174 && (reg == SDHCI_INT_STATUS)
@@ -217,8 +238,10 @@ static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg)
217 */ 238 */
218 return; 239 return;
219 case SDHCI_HOST_CONTROL: 240 case SDHCI_HOST_CONTROL:
220 /* FSL messed up here, so we can just keep those two */ 241 /* FSL messed up here, so we can just keep those three */
221 new_val = val & (SDHCI_CTRL_LED | SDHCI_CTRL_4BITBUS); 242 new_val = val & (SDHCI_CTRL_LED | \
243 SDHCI_CTRL_4BITBUS | \
244 SDHCI_CTRL_D3CD);
222 /* ensure the endianess */ 245 /* ensure the endianess */
223 new_val |= ESDHC_HOST_CONTROL_LE; 246 new_val |= ESDHC_HOST_CONTROL_LE;
224 /* DMA mode bits are shifted */ 247 /* DMA mode bits are shifted */
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index 4198dbbc5c20..fc7e4a515629 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -195,7 +195,8 @@ static int __devinit sdhci_pxav3_probe(struct platform_device *pdev)
195 clk_enable(clk); 195 clk_enable(clk);
196 196
197 host->quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL 197 host->quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL
198 | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC; 198 | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC
199 | SDHCI_QUIRK_32BIT_ADMA_SIZE;
199 200
200 /* enable 1/8V DDR capable */ 201 /* enable 1/8V DDR capable */
201 host->mmc->caps |= MMC_CAP_1_8V_DDR; 202 host->mmc->caps |= MMC_CAP_1_8V_DDR;
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 460ffaf0f6d7..fe886d6c474a 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -19,6 +19,7 @@
19#include <linux/clk.h> 19#include <linux/clk.h>
20#include <linux/io.h> 20#include <linux/io.h>
21#include <linux/gpio.h> 21#include <linux/gpio.h>
22#include <linux/module.h>
22 23
23#include <linux/mmc/host.h> 24#include <linux/mmc/host.h>
24 25
@@ -301,6 +302,8 @@ static int sdhci_s3c_platform_8bit_width(struct sdhci_host *host, int width)
301 ctrl &= ~SDHCI_CTRL_8BITBUS; 302 ctrl &= ~SDHCI_CTRL_8BITBUS;
302 break; 303 break;
303 default: 304 default:
305 ctrl &= ~SDHCI_CTRL_4BITBUS;
306 ctrl &= ~SDHCI_CTRL_8BITBUS;
304 break; 307 break;
305 } 308 }
306 309
@@ -502,6 +505,9 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
502 /* This host supports the Auto CMD12 */ 505 /* This host supports the Auto CMD12 */
503 host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12; 506 host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12;
504 507
508 /* Samsung SoCs need BROKEN_ADMA_ZEROLEN_DESC */
509 host->quirks |= SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC;
510
505 if (pdata->cd_type == S3C_SDHCI_CD_NONE || 511 if (pdata->cd_type == S3C_SDHCI_CD_NONE ||
506 pdata->cd_type == S3C_SDHCI_CD_PERMANENT) 512 pdata->cd_type == S3C_SDHCI_CD_PERMANENT)
507 host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION; 513 host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index 18b0bd31de78..21b00cefae63 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -21,7 +21,9 @@
21#include <linux/mmc/card.h> 21#include <linux/mmc/card.h>
22#include <linux/mmc/host.h> 22#include <linux/mmc/host.h>
23 23
24#include <mach/gpio.h> 24#include <asm/gpio.h>
25
26#include <mach/gpio-tegra.h>
25#include <mach/sdhci.h> 27#include <mach/sdhci.h>
26 28
27#include "sdhci-pltfm.h" 29#include "sdhci-pltfm.h"
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index c31a3343340d..0e02cc1df12e 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -628,12 +628,11 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
628 /* timeout in us */ 628 /* timeout in us */
629 if (!data) 629 if (!data)
630 target_timeout = cmd->cmd_timeout_ms * 1000; 630 target_timeout = cmd->cmd_timeout_ms * 1000;
631 else 631 else {
632 target_timeout = data->timeout_ns / 1000 + 632 target_timeout = data->timeout_ns / 1000;
633 data->timeout_clks / host->clock; 633 if (host->clock)
634 634 target_timeout += data->timeout_clks / host->clock;
635 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK) 635 }
636 host->timeout_clk = host->clock / 1000;
637 636
638 /* 637 /*
639 * Figure out needed cycles. 638 * Figure out needed cycles.
@@ -645,7 +644,6 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
645 * => 644 * =>
646 * (1) / (2) > 2^6 645 * (1) / (2) > 2^6
647 */ 646 */
648 BUG_ON(!host->timeout_clk);
649 count = 0; 647 count = 0;
650 current_timeout = (1 << 13) * 1000 / host->timeout_clk; 648 current_timeout = (1 << 13) * 1000 / host->timeout_clk;
651 while (current_timeout < target_timeout) { 649 while (current_timeout < target_timeout) {
@@ -1867,9 +1865,6 @@ static void sdhci_tasklet_finish(unsigned long param)
1867 1865
1868 del_timer(&host->timer); 1866 del_timer(&host->timer);
1869 1867
1870 if (host->version >= SDHCI_SPEC_300)
1871 del_timer(&host->tuning_timer);
1872
1873 mrq = host->mrq; 1868 mrq = host->mrq;
1874 1869
1875 /* 1870 /*
@@ -2461,22 +2456,6 @@ int sdhci_add_host(struct sdhci_host *host)
2461 host->max_clk = host->ops->get_max_clock(host); 2456 host->max_clk = host->ops->get_max_clock(host);
2462 } 2457 }
2463 2458
2464 host->timeout_clk =
2465 (caps[0] & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT;
2466 if (host->timeout_clk == 0) {
2467 if (host->ops->get_timeout_clock) {
2468 host->timeout_clk = host->ops->get_timeout_clock(host);
2469 } else if (!(host->quirks &
2470 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
2471 printk(KERN_ERR
2472 "%s: Hardware doesn't specify timeout clock "
2473 "frequency.\n", mmc_hostname(mmc));
2474 return -ENODEV;
2475 }
2476 }
2477 if (caps[0] & SDHCI_TIMEOUT_CLK_UNIT)
2478 host->timeout_clk *= 1000;
2479
2480 /* 2459 /*
2481 * In case of Host Controller v3.00, find out whether clock 2460 * In case of Host Controller v3.00, find out whether clock
2482 * multiplier is supported. 2461 * multiplier is supported.
@@ -2509,10 +2488,26 @@ int sdhci_add_host(struct sdhci_host *host)
2509 } else 2488 } else
2510 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200; 2489 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
2511 2490
2491 host->timeout_clk =
2492 (caps[0] & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT;
2493 if (host->timeout_clk == 0) {
2494 if (host->ops->get_timeout_clock) {
2495 host->timeout_clk = host->ops->get_timeout_clock(host);
2496 } else if (!(host->quirks &
2497 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
2498 printk(KERN_ERR
2499 "%s: Hardware doesn't specify timeout clock "
2500 "frequency.\n", mmc_hostname(mmc));
2501 return -ENODEV;
2502 }
2503 }
2504 if (caps[0] & SDHCI_TIMEOUT_CLK_UNIT)
2505 host->timeout_clk *= 1000;
2506
2512 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK) 2507 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)
2513 mmc->max_discard_to = (1 << 27) / (mmc->f_max / 1000); 2508 host->timeout_clk = mmc->f_max / 1000;
2514 else 2509
2515 mmc->max_discard_to = (1 << 27) / host->timeout_clk; 2510 mmc->max_discard_to = (1 << 27) / host->timeout_clk;
2516 2511
2517 mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23; 2512 mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
2518 2513
diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c
index 774f6439d7ce..0c4a672f5db6 100644
--- a/drivers/mmc/host/sh_mobile_sdhi.c
+++ b/drivers/mmc/host/sh_mobile_sdhi.c
@@ -120,11 +120,11 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev)
120 mmc_data->hclk = clk_get_rate(priv->clk); 120 mmc_data->hclk = clk_get_rate(priv->clk);
121 mmc_data->set_pwr = sh_mobile_sdhi_set_pwr; 121 mmc_data->set_pwr = sh_mobile_sdhi_set_pwr;
122 mmc_data->get_cd = sh_mobile_sdhi_get_cd; 122 mmc_data->get_cd = sh_mobile_sdhi_get_cd;
123 if (mmc_data->flags & TMIO_MMC_HAS_IDLE_WAIT)
124 mmc_data->write16_hook = sh_mobile_sdhi_write16_hook;
125 mmc_data->capabilities = MMC_CAP_MMC_HIGHSPEED; 123 mmc_data->capabilities = MMC_CAP_MMC_HIGHSPEED;
126 if (p) { 124 if (p) {
127 mmc_data->flags = p->tmio_flags; 125 mmc_data->flags = p->tmio_flags;
126 if (mmc_data->flags & TMIO_MMC_HAS_IDLE_WAIT)
127 mmc_data->write16_hook = sh_mobile_sdhi_write16_hook;
128 mmc_data->ocr_mask = p->tmio_ocr_mask; 128 mmc_data->ocr_mask = p->tmio_ocr_mask;
129 mmc_data->capabilities |= p->tmio_caps; 129 mmc_data->capabilities |= p->tmio_caps;
130 130
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index 8d185de90d20..44a9668c4b7a 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -27,7 +27,6 @@
27static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state) 27static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state)
28{ 28{
29 const struct mfd_cell *cell = mfd_get_cell(dev); 29 const struct mfd_cell *cell = mfd_get_cell(dev);
30 struct mmc_host *mmc = platform_get_drvdata(dev);
31 int ret; 30 int ret;
32 31
33 ret = tmio_mmc_host_suspend(&dev->dev); 32 ret = tmio_mmc_host_suspend(&dev->dev);
@@ -42,7 +41,6 @@ static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state)
42static int tmio_mmc_resume(struct platform_device *dev) 41static int tmio_mmc_resume(struct platform_device *dev)
43{ 42{
44 const struct mfd_cell *cell = mfd_get_cell(dev); 43 const struct mfd_cell *cell = mfd_get_cell(dev);
45 struct mmc_host *mmc = platform_get_drvdata(dev);
46 int ret = 0; 44 int ret = 0;
47 45
48 /* Tell the MFD core we are ready to be enabled */ 46 /* Tell the MFD core we are ready to be enabled */
diff --git a/drivers/mtd/nand/ams-delta.c b/drivers/mtd/nand/ams-delta.c
index 78017eb9318e..9e6b498c9beb 100644
--- a/drivers/mtd/nand/ams-delta.c
+++ b/drivers/mtd/nand/ams-delta.c
@@ -26,7 +26,7 @@
26#include <asm/io.h> 26#include <asm/io.h>
27#include <mach/hardware.h> 27#include <mach/hardware.h>
28#include <asm/sizes.h> 28#include <asm/sizes.h>
29#include <mach/gpio.h> 29#include <asm/gpio.h>
30#include <plat/board-ams-delta.h> 30#include <plat/board-ams-delta.h>
31 31
32/* 32/*
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index a916dec29215..6a1d6d9a2df9 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -40,7 +40,7 @@
40#include <asm/mach/flash.h> 40#include <asm/mach/flash.h>
41#include <plat/gpmc.h> 41#include <plat/gpmc.h>
42#include <plat/onenand.h> 42#include <plat/onenand.h>
43#include <mach/gpio.h> 43#include <asm/gpio.h>
44 44
45#include <plat/dma.h> 45#include <plat/dma.h>
46 46
diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h
index 65b5b76cc379..64fbb0021825 100644
--- a/drivers/mtd/ubi/debug.h
+++ b/drivers/mtd/ubi/debug.h
@@ -181,7 +181,7 @@ static inline int ubi_dbg_is_erase_failure(const struct ubi_device *ubi)
181 181
182#define ubi_dbg_msg(fmt, ...) do { \ 182#define ubi_dbg_msg(fmt, ...) do { \
183 if (0) \ 183 if (0) \
184 pr_debug(fmt "\n", ##__VA_ARGS__); \ 184 printk(KERN_DEBUG fmt "\n", ##__VA_ARGS__); \
185} while (0) 185} while (0)
186 186
187#define dbg_msg(fmt, ...) ubi_dbg_msg(fmt, ##__VA_ARGS__) 187#define dbg_msg(fmt, ...) ubi_dbg_msg(fmt, ##__VA_ARGS__)
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 8d0314dbd946..a44874e24f2a 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2535,7 +2535,7 @@ config S6GMAC
2535source "drivers/net/stmmac/Kconfig" 2535source "drivers/net/stmmac/Kconfig"
2536 2536
2537config PCH_GBE 2537config PCH_GBE
2538 tristate "Intel EG20T PCH / OKI SEMICONDUCTOR ML7223 IOH GbE" 2538 tristate "Intel EG20T PCH/OKI SEMICONDUCTOR IOH(ML7223/ML7831) GbE"
2539 depends on PCI 2539 depends on PCI
2540 select MII 2540 select MII
2541 ---help--- 2541 ---help---
@@ -2548,10 +2548,11 @@ config PCH_GBE
2548 This driver enables Gigabit Ethernet function. 2548 This driver enables Gigabit Ethernet function.
2549 2549
2550 This driver also can be used for OKI SEMICONDUCTOR IOH(Input/ 2550 This driver also can be used for OKI SEMICONDUCTOR IOH(Input/
2551 Output Hub), ML7223. 2551 Output Hub), ML7223/ML7831.
2552 ML7223 IOH is for MP(Media Phone) use. 2552 ML7223 IOH is for MP(Media Phone) use. ML7831 IOH is for general
2553 ML7223 is companion chip for Intel Atom E6xx series. 2553 purpose use.
2554 ML7223 is completely compatible for Intel EG20T PCH. 2554 ML7223/ML7831 is companion chip for Intel Atom E6xx series.
2555 ML7223/ML7831 is completely compatible for Intel EG20T PCH.
2555 2556
2556config FTGMAC100 2557config FTGMAC100
2557 tristate "Faraday FTGMAC100 Gigabit Ethernet support" 2558 tristate "Faraday FTGMAC100 Gigabit Ethernet support"
diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/arm/am79c961a.c
index 52fe21e1e2cd..3b1416e3d217 100644
--- a/drivers/net/arm/am79c961a.c
+++ b/drivers/net/arm/am79c961a.c
@@ -308,8 +308,11 @@ static void am79c961_timer(unsigned long data)
308 struct net_device *dev = (struct net_device *)data; 308 struct net_device *dev = (struct net_device *)data;
309 struct dev_priv *priv = netdev_priv(dev); 309 struct dev_priv *priv = netdev_priv(dev);
310 unsigned int lnkstat, carrier; 310 unsigned int lnkstat, carrier;
311 unsigned long flags;
311 312
313 spin_lock_irqsave(&priv->chip_lock, flags);
312 lnkstat = read_ireg(dev->base_addr, ISALED0) & ISALED0_LNKST; 314 lnkstat = read_ireg(dev->base_addr, ISALED0) & ISALED0_LNKST;
315 spin_unlock_irqrestore(&priv->chip_lock, flags);
313 carrier = netif_carrier_ok(dev); 316 carrier = netif_carrier_ok(dev);
314 317
315 if (lnkstat && !carrier) { 318 if (lnkstat && !carrier) {
diff --git a/drivers/net/arm/at91_ether.c b/drivers/net/arm/at91_ether.c
index 29dc43523cec..48b4ec121f7f 100644
--- a/drivers/net/arm/at91_ether.c
+++ b/drivers/net/arm/at91_ether.c
@@ -35,7 +35,7 @@
35#include <asm/mach-types.h> 35#include <asm/mach-types.h>
36 36
37#include <mach/at91rm9200_emac.h> 37#include <mach/at91rm9200_emac.h>
38#include <mach/gpio.h> 38#include <asm/gpio.h>
39#include <mach/board.h> 39#include <mach/board.h>
40 40
41#include "at91_ether.h" 41#include "at91_ether.h"
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index c423504a755f..e46df5331c55 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -315,6 +315,14 @@ union db_prod {
315 u32 raw; 315 u32 raw;
316}; 316};
317 317
318/* dropless fc FW/HW related params */
319#define BRB_SIZE(bp) (CHIP_IS_E3(bp) ? 1024 : 512)
320#define MAX_AGG_QS(bp) (CHIP_IS_E1(bp) ? \
321 ETH_MAX_AGGREGATION_QUEUES_E1 :\
322 ETH_MAX_AGGREGATION_QUEUES_E1H_E2)
323#define FW_DROP_LEVEL(bp) (3 + MAX_SPQ_PENDING + MAX_AGG_QS(bp))
324#define FW_PREFETCH_CNT 16
325#define DROPLESS_FC_HEADROOM 100
318 326
319/* MC hsi */ 327/* MC hsi */
320#define BCM_PAGE_SHIFT 12 328#define BCM_PAGE_SHIFT 12
@@ -331,15 +339,35 @@ union db_prod {
331/* SGE ring related macros */ 339/* SGE ring related macros */
332#define NUM_RX_SGE_PAGES 2 340#define NUM_RX_SGE_PAGES 2
333#define RX_SGE_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge)) 341#define RX_SGE_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge))
334#define MAX_RX_SGE_CNT (RX_SGE_CNT - 2) 342#define NEXT_PAGE_SGE_DESC_CNT 2
343#define MAX_RX_SGE_CNT (RX_SGE_CNT - NEXT_PAGE_SGE_DESC_CNT)
335/* RX_SGE_CNT is promised to be a power of 2 */ 344/* RX_SGE_CNT is promised to be a power of 2 */
336#define RX_SGE_MASK (RX_SGE_CNT - 1) 345#define RX_SGE_MASK (RX_SGE_CNT - 1)
337#define NUM_RX_SGE (RX_SGE_CNT * NUM_RX_SGE_PAGES) 346#define NUM_RX_SGE (RX_SGE_CNT * NUM_RX_SGE_PAGES)
338#define MAX_RX_SGE (NUM_RX_SGE - 1) 347#define MAX_RX_SGE (NUM_RX_SGE - 1)
339#define NEXT_SGE_IDX(x) ((((x) & RX_SGE_MASK) == \ 348#define NEXT_SGE_IDX(x) ((((x) & RX_SGE_MASK) == \
340 (MAX_RX_SGE_CNT - 1)) ? (x) + 3 : (x) + 1) 349 (MAX_RX_SGE_CNT - 1)) ? \
350 (x) + 1 + NEXT_PAGE_SGE_DESC_CNT : \
351 (x) + 1)
341#define RX_SGE(x) ((x) & MAX_RX_SGE) 352#define RX_SGE(x) ((x) & MAX_RX_SGE)
342 353
354/*
355 * Number of required SGEs is the sum of two:
356 * 1. Number of possible opened aggregations (next packet for
357 * these aggregations will probably consume SGE immidiatelly)
358 * 2. Rest of BRB blocks divided by 2 (block will consume new SGE only
359 * after placement on BD for new TPA aggregation)
360 *
361 * Takes into account NEXT_PAGE_SGE_DESC_CNT "next" elements on each page
362 */
363#define NUM_SGE_REQ (MAX_AGG_QS(bp) + \
364 (BRB_SIZE(bp) - MAX_AGG_QS(bp)) / 2)
365#define NUM_SGE_PG_REQ ((NUM_SGE_REQ + MAX_RX_SGE_CNT - 1) / \
366 MAX_RX_SGE_CNT)
367#define SGE_TH_LO(bp) (NUM_SGE_REQ + \
368 NUM_SGE_PG_REQ * NEXT_PAGE_SGE_DESC_CNT)
369#define SGE_TH_HI(bp) (SGE_TH_LO(bp) + DROPLESS_FC_HEADROOM)
370
343/* Manipulate a bit vector defined as an array of u64 */ 371/* Manipulate a bit vector defined as an array of u64 */
344 372
345/* Number of bits in one sge_mask array element */ 373/* Number of bits in one sge_mask array element */
@@ -551,24 +579,43 @@ struct bnx2x_fastpath {
551 579
552#define NUM_TX_RINGS 16 580#define NUM_TX_RINGS 16
553#define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_tx_bd_types)) 581#define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_tx_bd_types))
554#define MAX_TX_DESC_CNT (TX_DESC_CNT - 1) 582#define NEXT_PAGE_TX_DESC_CNT 1
583#define MAX_TX_DESC_CNT (TX_DESC_CNT - NEXT_PAGE_TX_DESC_CNT)
555#define NUM_TX_BD (TX_DESC_CNT * NUM_TX_RINGS) 584#define NUM_TX_BD (TX_DESC_CNT * NUM_TX_RINGS)
556#define MAX_TX_BD (NUM_TX_BD - 1) 585#define MAX_TX_BD (NUM_TX_BD - 1)
557#define MAX_TX_AVAIL (MAX_TX_DESC_CNT * NUM_TX_RINGS - 2) 586#define MAX_TX_AVAIL (MAX_TX_DESC_CNT * NUM_TX_RINGS - 2)
558#define NEXT_TX_IDX(x) ((((x) & MAX_TX_DESC_CNT) == \ 587#define NEXT_TX_IDX(x) ((((x) & MAX_TX_DESC_CNT) == \
559 (MAX_TX_DESC_CNT - 1)) ? (x) + 2 : (x) + 1) 588 (MAX_TX_DESC_CNT - 1)) ? \
589 (x) + 1 + NEXT_PAGE_TX_DESC_CNT : \
590 (x) + 1)
560#define TX_BD(x) ((x) & MAX_TX_BD) 591#define TX_BD(x) ((x) & MAX_TX_BD)
561#define TX_BD_POFF(x) ((x) & MAX_TX_DESC_CNT) 592#define TX_BD_POFF(x) ((x) & MAX_TX_DESC_CNT)
562 593
563/* The RX BD ring is special, each bd is 8 bytes but the last one is 16 */ 594/* The RX BD ring is special, each bd is 8 bytes but the last one is 16 */
564#define NUM_RX_RINGS 8 595#define NUM_RX_RINGS 8
565#define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd)) 596#define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd))
566#define MAX_RX_DESC_CNT (RX_DESC_CNT - 2) 597#define NEXT_PAGE_RX_DESC_CNT 2
598#define MAX_RX_DESC_CNT (RX_DESC_CNT - NEXT_PAGE_RX_DESC_CNT)
567#define RX_DESC_MASK (RX_DESC_CNT - 1) 599#define RX_DESC_MASK (RX_DESC_CNT - 1)
568#define NUM_RX_BD (RX_DESC_CNT * NUM_RX_RINGS) 600#define NUM_RX_BD (RX_DESC_CNT * NUM_RX_RINGS)
569#define MAX_RX_BD (NUM_RX_BD - 1) 601#define MAX_RX_BD (NUM_RX_BD - 1)
570#define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2) 602#define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2)
571#define MIN_RX_AVAIL 128 603
604/* dropless fc calculations for BDs
605 *
606 * Number of BDs should as number of buffers in BRB:
607 * Low threshold takes into account NEXT_PAGE_RX_DESC_CNT
608 * "next" elements on each page
609 */
610#define NUM_BD_REQ BRB_SIZE(bp)
611#define NUM_BD_PG_REQ ((NUM_BD_REQ + MAX_RX_DESC_CNT - 1) / \
612 MAX_RX_DESC_CNT)
613#define BD_TH_LO(bp) (NUM_BD_REQ + \
614 NUM_BD_PG_REQ * NEXT_PAGE_RX_DESC_CNT + \
615 FW_DROP_LEVEL(bp))
616#define BD_TH_HI(bp) (BD_TH_LO(bp) + DROPLESS_FC_HEADROOM)
617
618#define MIN_RX_AVAIL ((bp)->dropless_fc ? BD_TH_HI(bp) + 128 : 128)
572 619
573#define MIN_RX_SIZE_TPA_HW (CHIP_IS_E1(bp) ? \ 620#define MIN_RX_SIZE_TPA_HW (CHIP_IS_E1(bp) ? \
574 ETH_MIN_RX_CQES_WITH_TPA_E1 : \ 621 ETH_MIN_RX_CQES_WITH_TPA_E1 : \
@@ -579,7 +626,9 @@ struct bnx2x_fastpath {
579 MIN_RX_AVAIL)) 626 MIN_RX_AVAIL))
580 627
581#define NEXT_RX_IDX(x) ((((x) & RX_DESC_MASK) == \ 628#define NEXT_RX_IDX(x) ((((x) & RX_DESC_MASK) == \
582 (MAX_RX_DESC_CNT - 1)) ? (x) + 3 : (x) + 1) 629 (MAX_RX_DESC_CNT - 1)) ? \
630 (x) + 1 + NEXT_PAGE_RX_DESC_CNT : \
631 (x) + 1)
583#define RX_BD(x) ((x) & MAX_RX_BD) 632#define RX_BD(x) ((x) & MAX_RX_BD)
584 633
585/* 634/*
@@ -589,14 +638,31 @@ struct bnx2x_fastpath {
589#define CQE_BD_REL (sizeof(union eth_rx_cqe) / sizeof(struct eth_rx_bd)) 638#define CQE_BD_REL (sizeof(union eth_rx_cqe) / sizeof(struct eth_rx_bd))
590#define NUM_RCQ_RINGS (NUM_RX_RINGS * CQE_BD_REL) 639#define NUM_RCQ_RINGS (NUM_RX_RINGS * CQE_BD_REL)
591#define RCQ_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe)) 640#define RCQ_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe))
592#define MAX_RCQ_DESC_CNT (RCQ_DESC_CNT - 1) 641#define NEXT_PAGE_RCQ_DESC_CNT 1
642#define MAX_RCQ_DESC_CNT (RCQ_DESC_CNT - NEXT_PAGE_RCQ_DESC_CNT)
593#define NUM_RCQ_BD (RCQ_DESC_CNT * NUM_RCQ_RINGS) 643#define NUM_RCQ_BD (RCQ_DESC_CNT * NUM_RCQ_RINGS)
594#define MAX_RCQ_BD (NUM_RCQ_BD - 1) 644#define MAX_RCQ_BD (NUM_RCQ_BD - 1)
595#define MAX_RCQ_AVAIL (MAX_RCQ_DESC_CNT * NUM_RCQ_RINGS - 2) 645#define MAX_RCQ_AVAIL (MAX_RCQ_DESC_CNT * NUM_RCQ_RINGS - 2)
596#define NEXT_RCQ_IDX(x) ((((x) & MAX_RCQ_DESC_CNT) == \ 646#define NEXT_RCQ_IDX(x) ((((x) & MAX_RCQ_DESC_CNT) == \
597 (MAX_RCQ_DESC_CNT - 1)) ? (x) + 2 : (x) + 1) 647 (MAX_RCQ_DESC_CNT - 1)) ? \
648 (x) + 1 + NEXT_PAGE_RCQ_DESC_CNT : \
649 (x) + 1)
598#define RCQ_BD(x) ((x) & MAX_RCQ_BD) 650#define RCQ_BD(x) ((x) & MAX_RCQ_BD)
599 651
652/* dropless fc calculations for RCQs
653 *
654 * Number of RCQs should be as number of buffers in BRB:
655 * Low threshold takes into account NEXT_PAGE_RCQ_DESC_CNT
656 * "next" elements on each page
657 */
658#define NUM_RCQ_REQ BRB_SIZE(bp)
659#define NUM_RCQ_PG_REQ ((NUM_BD_REQ + MAX_RCQ_DESC_CNT - 1) / \
660 MAX_RCQ_DESC_CNT)
661#define RCQ_TH_LO(bp) (NUM_RCQ_REQ + \
662 NUM_RCQ_PG_REQ * NEXT_PAGE_RCQ_DESC_CNT + \
663 FW_DROP_LEVEL(bp))
664#define RCQ_TH_HI(bp) (RCQ_TH_LO(bp) + DROPLESS_FC_HEADROOM)
665
600 666
601/* This is needed for determining of last_max */ 667/* This is needed for determining of last_max */
602#define SUB_S16(a, b) (s16)((s16)(a) - (s16)(b)) 668#define SUB_S16(a, b) (s16)((s16)(a) - (s16)(b))
@@ -685,24 +751,17 @@ struct bnx2x_fastpath {
685#define FP_CSB_FUNC_OFF \ 751#define FP_CSB_FUNC_OFF \
686 offsetof(struct cstorm_status_block_c, func) 752 offsetof(struct cstorm_status_block_c, func)
687 753
688#define HC_INDEX_TOE_RX_CQ_CONS 0 /* Formerly Ustorm TOE CQ index */ 754#define HC_INDEX_ETH_RX_CQ_CONS 1
689 /* (HC_INDEX_U_TOE_RX_CQ_CONS) */
690#define HC_INDEX_ETH_RX_CQ_CONS 1 /* Formerly Ustorm ETH CQ index */
691 /* (HC_INDEX_U_ETH_RX_CQ_CONS) */
692#define HC_INDEX_ETH_RX_BD_CONS 2 /* Formerly Ustorm ETH BD index */
693 /* (HC_INDEX_U_ETH_RX_BD_CONS) */
694
695#define HC_INDEX_TOE_TX_CQ_CONS 4 /* Formerly Cstorm TOE CQ index */
696 /* (HC_INDEX_C_TOE_TX_CQ_CONS) */
697#define HC_INDEX_ETH_TX_CQ_CONS_COS0 5 /* Formerly Cstorm ETH CQ index */
698 /* (HC_INDEX_C_ETH_TX_CQ_CONS) */
699#define HC_INDEX_ETH_TX_CQ_CONS_COS1 6 /* Formerly Cstorm ETH CQ index */
700 /* (HC_INDEX_C_ETH_TX_CQ_CONS) */
701#define HC_INDEX_ETH_TX_CQ_CONS_COS2 7 /* Formerly Cstorm ETH CQ index */
702 /* (HC_INDEX_C_ETH_TX_CQ_CONS) */
703 755
704#define HC_INDEX_ETH_FIRST_TX_CQ_CONS HC_INDEX_ETH_TX_CQ_CONS_COS0 756#define HC_INDEX_OOO_TX_CQ_CONS 4
705 757
758#define HC_INDEX_ETH_TX_CQ_CONS_COS0 5
759
760#define HC_INDEX_ETH_TX_CQ_CONS_COS1 6
761
762#define HC_INDEX_ETH_TX_CQ_CONS_COS2 7
763
764#define HC_INDEX_ETH_FIRST_TX_CQ_CONS HC_INDEX_ETH_TX_CQ_CONS_COS0
706 765
707#define BNX2X_RX_SB_INDEX \ 766#define BNX2X_RX_SB_INDEX \
708 (&fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS]) 767 (&fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS])
@@ -1100,11 +1159,12 @@ struct bnx2x {
1100#define BP_PORT(bp) (bp->pfid & 1) 1159#define BP_PORT(bp) (bp->pfid & 1)
1101#define BP_FUNC(bp) (bp->pfid) 1160#define BP_FUNC(bp) (bp->pfid)
1102#define BP_ABS_FUNC(bp) (bp->pf_num) 1161#define BP_ABS_FUNC(bp) (bp->pf_num)
1103#define BP_E1HVN(bp) (bp->pfid >> 1) 1162#define BP_VN(bp) ((bp)->pfid >> 1)
1104#define BP_VN(bp) (BP_E1HVN(bp)) /*remove when approved*/ 1163#define BP_MAX_VN_NUM(bp) (CHIP_MODE_IS_4_PORT(bp) ? 2 : 4)
1105#define BP_L_ID(bp) (BP_E1HVN(bp) << 2) 1164#define BP_L_ID(bp) (BP_VN(bp) << 2)
1106#define BP_FW_MB_IDX(bp) (BP_PORT(bp) +\ 1165#define BP_FW_MB_IDX_VN(bp, vn) (BP_PORT(bp) +\
1107 BP_VN(bp) * ((CHIP_IS_E1x(bp) || (CHIP_MODE_IS_4_PORT(bp))) ? 2 : 1)) 1166 (vn) * ((CHIP_IS_E1x(bp) || (CHIP_MODE_IS_4_PORT(bp))) ? 2 : 1))
1167#define BP_FW_MB_IDX(bp) BP_FW_MB_IDX_VN(bp, BP_VN(bp))
1108 1168
1109 struct net_device *dev; 1169 struct net_device *dev;
1110 struct pci_dev *pdev; 1170 struct pci_dev *pdev;
@@ -1767,7 +1827,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1767 1827
1768#define MAX_DMAE_C_PER_PORT 8 1828#define MAX_DMAE_C_PER_PORT 8
1769#define INIT_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \ 1829#define INIT_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
1770 BP_E1HVN(bp)) 1830 BP_VN(bp))
1771#define PMF_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \ 1831#define PMF_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
1772 E1HVN_MAX) 1832 E1HVN_MAX)
1773 1833
@@ -1793,7 +1853,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1793 1853
1794/* must be used on a CID before placing it on a HW ring */ 1854/* must be used on a CID before placing it on a HW ring */
1795#define HW_CID(bp, x) ((BP_PORT(bp) << 23) | \ 1855#define HW_CID(bp, x) ((BP_PORT(bp) << 23) | \
1796 (BP_E1HVN(bp) << BNX2X_SWCID_SHIFT) | \ 1856 (BP_VN(bp) << BNX2X_SWCID_SHIFT) | \
1797 (x)) 1857 (x))
1798 1858
1799#define SP_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_spe)) 1859#define SP_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_spe))
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
index d724a18b5285..c4cbf9736414 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/bnx2x/bnx2x_cmn.c
@@ -63,8 +63,9 @@ static inline void bnx2x_bz_fp(struct bnx2x *bp, int index)
63 fp->disable_tpa = ((bp->flags & TPA_ENABLE_FLAG) == 0); 63 fp->disable_tpa = ((bp->flags & TPA_ENABLE_FLAG) == 0);
64 64
65#ifdef BCM_CNIC 65#ifdef BCM_CNIC
66 /* We don't want TPA on FCoE, FWD and OOO L2 rings */ 66 /* We don't want TPA on an FCoE L2 ring */
67 bnx2x_fcoe(bp, disable_tpa) = 1; 67 if (IS_FCOE_FP(fp))
68 fp->disable_tpa = 1;
68#endif 69#endif
69} 70}
70 71
@@ -986,8 +987,6 @@ void __bnx2x_link_report(struct bnx2x *bp)
986void bnx2x_init_rx_rings(struct bnx2x *bp) 987void bnx2x_init_rx_rings(struct bnx2x *bp)
987{ 988{
988 int func = BP_FUNC(bp); 989 int func = BP_FUNC(bp);
989 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
990 ETH_MAX_AGGREGATION_QUEUES_E1H_E2;
991 u16 ring_prod; 990 u16 ring_prod;
992 int i, j; 991 int i, j;
993 992
@@ -1000,7 +999,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
1000 999
1001 if (!fp->disable_tpa) { 1000 if (!fp->disable_tpa) {
1002 /* Fill the per-aggregtion pool */ 1001 /* Fill the per-aggregtion pool */
1003 for (i = 0; i < max_agg_queues; i++) { 1002 for (i = 0; i < MAX_AGG_QS(bp); i++) {
1004 struct bnx2x_agg_info *tpa_info = 1003 struct bnx2x_agg_info *tpa_info =
1005 &fp->tpa_info[i]; 1004 &fp->tpa_info[i];
1006 struct sw_rx_bd *first_buf = 1005 struct sw_rx_bd *first_buf =
@@ -1040,7 +1039,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
1040 bnx2x_free_rx_sge_range(bp, fp, 1039 bnx2x_free_rx_sge_range(bp, fp,
1041 ring_prod); 1040 ring_prod);
1042 bnx2x_free_tpa_pool(bp, fp, 1041 bnx2x_free_tpa_pool(bp, fp,
1043 max_agg_queues); 1042 MAX_AGG_QS(bp));
1044 fp->disable_tpa = 1; 1043 fp->disable_tpa = 1;
1045 ring_prod = 0; 1044 ring_prod = 0;
1046 break; 1045 break;
@@ -1136,9 +1135,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1136 bnx2x_free_rx_bds(fp); 1135 bnx2x_free_rx_bds(fp);
1137 1136
1138 if (!fp->disable_tpa) 1137 if (!fp->disable_tpa)
1139 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ? 1138 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1140 ETH_MAX_AGGREGATION_QUEUES_E1 :
1141 ETH_MAX_AGGREGATION_QUEUES_E1H_E2);
1142 } 1139 }
1143} 1140}
1144 1141
@@ -1404,10 +1401,9 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1404u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb) 1401u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1405{ 1402{
1406 struct bnx2x *bp = netdev_priv(dev); 1403 struct bnx2x *bp = netdev_priv(dev);
1404
1407#ifdef BCM_CNIC 1405#ifdef BCM_CNIC
1408 if (NO_FCOE(bp)) 1406 if (!NO_FCOE(bp)) {
1409 return skb_tx_hash(dev, skb);
1410 else {
1411 struct ethhdr *hdr = (struct ethhdr *)skb->data; 1407 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1412 u16 ether_type = ntohs(hdr->h_proto); 1408 u16 ether_type = ntohs(hdr->h_proto);
1413 1409
@@ -1424,8 +1420,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1424 return bnx2x_fcoe_tx(bp, txq_index); 1420 return bnx2x_fcoe_tx(bp, txq_index);
1425 } 1421 }
1426#endif 1422#endif
1427 /* Select a none-FCoE queue: if FCoE is enabled, exclude FCoE L2 ring 1423 /* select a non-FCoE queue */
1428 */
1429 return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp)); 1424 return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
1430} 1425}
1431 1426
@@ -1448,6 +1443,28 @@ void bnx2x_set_num_queues(struct bnx2x *bp)
1448 bp->num_queues += NON_ETH_CONTEXT_USE; 1443 bp->num_queues += NON_ETH_CONTEXT_USE;
1449} 1444}
1450 1445
1446/**
1447 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1448 *
1449 * @bp: Driver handle
1450 *
1451 * We currently support for at most 16 Tx queues for each CoS thus we will
1452 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1453 * bp->max_cos.
1454 *
1455 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1456 * index after all ETH L2 indices.
1457 *
1458 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1459 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1460 * 16..31,...) with indicies that are not coupled with any real Tx queue.
1461 *
1462 * The proper configuration of skb->queue_mapping is handled by
1463 * bnx2x_select_queue() and __skb_tx_hash().
1464 *
1465 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1466 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1467 */
1451static inline int bnx2x_set_real_num_queues(struct bnx2x *bp) 1468static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
1452{ 1469{
1453 int rc, tx, rx; 1470 int rc, tx, rx;
@@ -3074,15 +3091,20 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
3074 struct bnx2x_fastpath *fp = &bp->fp[index]; 3091 struct bnx2x_fastpath *fp = &bp->fp[index];
3075 int ring_size = 0; 3092 int ring_size = 0;
3076 u8 cos; 3093 u8 cos;
3094 int rx_ring_size = 0;
3077 3095
3078 /* if rx_ring_size specified - use it */ 3096 /* if rx_ring_size specified - use it */
3079 int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size : 3097 if (!bp->rx_ring_size) {
3080 MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
3081 3098
3082 /* allocate at least number of buffers required by FW */ 3099 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
3083 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA : 3100
3084 MIN_RX_SIZE_TPA, 3101 /* allocate at least number of buffers required by FW */
3085 rx_ring_size); 3102 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
3103 MIN_RX_SIZE_TPA, rx_ring_size);
3104
3105 bp->rx_ring_size = rx_ring_size;
3106 } else
3107 rx_ring_size = bp->rx_ring_size;
3086 3108
3087 /* Common */ 3109 /* Common */
3088 sb = &bnx2x_fp(bp, index, status_blk); 3110 sb = &bnx2x_fp(bp, index, status_blk);
diff --git a/drivers/net/bnx2x/bnx2x_dcb.c b/drivers/net/bnx2x/bnx2x_dcb.c
index a4ea35f6a456..0b4acf67e0c6 100644
--- a/drivers/net/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/bnx2x/bnx2x_dcb.c
@@ -920,7 +920,7 @@ static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp,
920 920
921void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled) 921void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled)
922{ 922{
923 if (!CHIP_IS_E1x(bp)) { 923 if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3(bp)) {
924 bp->dcb_state = dcb_on; 924 bp->dcb_state = dcb_on;
925 bp->dcbx_enabled = dcbx_enabled; 925 bp->dcbx_enabled = dcbx_enabled;
926 } else { 926 } else {
@@ -2120,6 +2120,7 @@ static u8 bnx2x_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap)
2120 break; 2120 break;
2121 case DCB_CAP_ATTR_DCBX: 2121 case DCB_CAP_ATTR_DCBX:
2122 *cap = BNX2X_DCBX_CAPS; 2122 *cap = BNX2X_DCBX_CAPS;
2123 break;
2123 default: 2124 default:
2124 rval = -EINVAL; 2125 rval = -EINVAL;
2125 break; 2126 break;
diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c
index 221863059dae..cf3e47914dd7 100644
--- a/drivers/net/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/bnx2x/bnx2x_ethtool.c
@@ -363,13 +363,50 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
363 } 363 }
364 364
365 /* advertise the requested speed and duplex if supported */ 365 /* advertise the requested speed and duplex if supported */
366 cmd->advertising &= bp->port.supported[cfg_idx]; 366 if (cmd->advertising & ~(bp->port.supported[cfg_idx])) {
367 DP(NETIF_MSG_LINK, "Advertisement parameters "
368 "are not supported\n");
369 return -EINVAL;
370 }
367 371
368 bp->link_params.req_line_speed[cfg_idx] = SPEED_AUTO_NEG; 372 bp->link_params.req_line_speed[cfg_idx] = SPEED_AUTO_NEG;
369 bp->link_params.req_duplex[cfg_idx] = DUPLEX_FULL; 373 bp->link_params.req_duplex[cfg_idx] = cmd->duplex;
370 bp->port.advertising[cfg_idx] |= (ADVERTISED_Autoneg | 374 bp->port.advertising[cfg_idx] = (ADVERTISED_Autoneg |
371 cmd->advertising); 375 cmd->advertising);
376 if (cmd->advertising) {
377
378 bp->link_params.speed_cap_mask[cfg_idx] = 0;
379 if (cmd->advertising & ADVERTISED_10baseT_Half) {
380 bp->link_params.speed_cap_mask[cfg_idx] |=
381 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF;
382 }
383 if (cmd->advertising & ADVERTISED_10baseT_Full)
384 bp->link_params.speed_cap_mask[cfg_idx] |=
385 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL;
372 386
387 if (cmd->advertising & ADVERTISED_100baseT_Full)
388 bp->link_params.speed_cap_mask[cfg_idx] |=
389 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL;
390
391 if (cmd->advertising & ADVERTISED_100baseT_Half) {
392 bp->link_params.speed_cap_mask[cfg_idx] |=
393 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF;
394 }
395 if (cmd->advertising & ADVERTISED_1000baseT_Half) {
396 bp->link_params.speed_cap_mask[cfg_idx] |=
397 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G;
398 }
399 if (cmd->advertising & (ADVERTISED_1000baseT_Full |
400 ADVERTISED_1000baseKX_Full))
401 bp->link_params.speed_cap_mask[cfg_idx] |=
402 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G;
403
404 if (cmd->advertising & (ADVERTISED_10000baseT_Full |
405 ADVERTISED_10000baseKX4_Full |
406 ADVERTISED_10000baseKR_Full))
407 bp->link_params.speed_cap_mask[cfg_idx] |=
408 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G;
409 }
373 } else { /* forced speed */ 410 } else { /* forced speed */
374 /* advertise the requested speed and duplex if supported */ 411 /* advertise the requested speed and duplex if supported */
375 switch (speed) { 412 switch (speed) {
@@ -1310,10 +1347,7 @@ static void bnx2x_get_ringparam(struct net_device *dev,
1310 if (bp->rx_ring_size) 1347 if (bp->rx_ring_size)
1311 ering->rx_pending = bp->rx_ring_size; 1348 ering->rx_pending = bp->rx_ring_size;
1312 else 1349 else
1313 if (bp->state == BNX2X_STATE_OPEN && bp->num_queues) 1350 ering->rx_pending = MAX_RX_AVAIL;
1314 ering->rx_pending = MAX_RX_AVAIL/bp->num_queues;
1315 else
1316 ering->rx_pending = MAX_RX_AVAIL;
1317 1351
1318 ering->rx_mini_pending = 0; 1352 ering->rx_mini_pending = 0;
1319 ering->rx_jumbo_pending = 0; 1353 ering->rx_jumbo_pending = 0;
diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c
index d45b1555a602..ba15bdc5a1a9 100644
--- a/drivers/net/bnx2x/bnx2x_link.c
+++ b/drivers/net/bnx2x/bnx2x_link.c
@@ -778,9 +778,9 @@ static int bnx2x_ets_e3b0_set_cos_bw(struct bnx2x *bp,
778{ 778{
779 u32 nig_reg_adress_crd_weight = 0; 779 u32 nig_reg_adress_crd_weight = 0;
780 u32 pbf_reg_adress_crd_weight = 0; 780 u32 pbf_reg_adress_crd_weight = 0;
781 /* Calculate and set BW for this COS*/ 781 /* Calculate and set BW for this COS - use 1 instead of 0 for BW */
782 const u32 cos_bw_nig = (bw * min_w_val_nig) / total_bw; 782 const u32 cos_bw_nig = ((bw ? bw : 1) * min_w_val_nig) / total_bw;
783 const u32 cos_bw_pbf = (bw * min_w_val_pbf) / total_bw; 783 const u32 cos_bw_pbf = ((bw ? bw : 1) * min_w_val_pbf) / total_bw;
784 784
785 switch (cos_entry) { 785 switch (cos_entry) {
786 case 0: 786 case 0:
@@ -852,18 +852,12 @@ static int bnx2x_ets_e3b0_get_total_bw(
852 /* Calculate total BW requested */ 852 /* Calculate total BW requested */
853 for (cos_idx = 0; cos_idx < ets_params->num_of_cos; cos_idx++) { 853 for (cos_idx = 0; cos_idx < ets_params->num_of_cos; cos_idx++) {
854 if (bnx2x_cos_state_bw == ets_params->cos[cos_idx].state) { 854 if (bnx2x_cos_state_bw == ets_params->cos[cos_idx].state) {
855 855 *total_bw +=
856 if (0 == ets_params->cos[cos_idx].params.bw_params.bw) { 856 ets_params->cos[cos_idx].params.bw_params.bw;
857 DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config BW"
858 "was set to 0\n");
859 return -EINVAL;
860 } 857 }
861 *total_bw +=
862 ets_params->cos[cos_idx].params.bw_params.bw;
863 }
864 } 858 }
865 859
866 /*Check taotl BW is valid */ 860 /* Check total BW is valid */
867 if ((100 != *total_bw) || (0 == *total_bw)) { 861 if ((100 != *total_bw) || (0 == *total_bw)) {
868 if (0 == *total_bw) { 862 if (0 == *total_bw) {
869 DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config toatl BW" 863 DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config toatl BW"
@@ -1726,7 +1720,7 @@ static int bnx2x_xmac_enable(struct link_params *params,
1726 1720
1727 /* Check loopback mode */ 1721 /* Check loopback mode */
1728 if (lb) 1722 if (lb)
1729 val |= XMAC_CTRL_REG_CORE_LOCAL_LPBK; 1723 val |= XMAC_CTRL_REG_LINE_LOCAL_LPBK;
1730 REG_WR(bp, xmac_base + XMAC_REG_CTRL, val); 1724 REG_WR(bp, xmac_base + XMAC_REG_CTRL, val);
1731 bnx2x_set_xumac_nig(params, 1725 bnx2x_set_xumac_nig(params,
1732 ((vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) != 0), 1); 1726 ((vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) != 0), 1);
@@ -3630,6 +3624,12 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3630 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, 3624 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
3631 MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, val16); 3625 MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, val16);
3632 3626
3627 /* Advertised and set FEC (Forward Error Correction) */
3628 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
3629 MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT2,
3630 (MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_ABILITY |
3631 MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_REQ));
3632
3633 /* Enable CL37 BAM */ 3633 /* Enable CL37 BAM */
3634 if (REG_RD(bp, params->shmem_base + 3634 if (REG_RD(bp, params->shmem_base +
3635 offsetof(struct shmem_region, dev_info. 3635 offsetof(struct shmem_region, dev_info.
@@ -5924,7 +5924,7 @@ int bnx2x_set_led(struct link_params *params,
5924 (tmp | EMAC_LED_OVERRIDE)); 5924 (tmp | EMAC_LED_OVERRIDE));
5925 /* 5925 /*
5926 * return here without enabling traffic 5926 * return here without enabling traffic
5927 * LED blink andsetting rate in ON mode. 5927 * LED blink and setting rate in ON mode.
5928 * In oper mode, enabling LED blink 5928 * In oper mode, enabling LED blink
5929 * and setting rate is needed. 5929 * and setting rate is needed.
5930 */ 5930 */
@@ -5936,7 +5936,11 @@ int bnx2x_set_led(struct link_params *params,
5936 * This is a work-around for HW issue found when link 5936 * This is a work-around for HW issue found when link
5937 * is up in CL73 5937 * is up in CL73
5938 */ 5938 */
5939 REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1); 5939 if ((!CHIP_IS_E3(bp)) ||
5940 (CHIP_IS_E3(bp) &&
5941 mode == LED_MODE_ON))
5942 REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
5943
5940 if (CHIP_IS_E1x(bp) || 5944 if (CHIP_IS_E1x(bp) ||
5941 CHIP_IS_E2(bp) || 5945 CHIP_IS_E2(bp) ||
5942 (mode == LED_MODE_ON)) 5946 (mode == LED_MODE_ON))
@@ -10638,8 +10642,7 @@ static struct bnx2x_phy phy_warpcore = {
10638 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT, 10642 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT,
10639 .addr = 0xff, 10643 .addr = 0xff,
10640 .def_md_devad = 0, 10644 .def_md_devad = 0,
10641 .flags = (FLAGS_HW_LOCK_REQUIRED | 10645 .flags = FLAGS_HW_LOCK_REQUIRED,
10642 FLAGS_TX_ERROR_CHECK),
10643 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 10646 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
10644 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 10647 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
10645 .mdio_ctrl = 0, 10648 .mdio_ctrl = 0,
@@ -10765,8 +10768,7 @@ static struct bnx2x_phy phy_8706 = {
10765 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706, 10768 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706,
10766 .addr = 0xff, 10769 .addr = 0xff,
10767 .def_md_devad = 0, 10770 .def_md_devad = 0,
10768 .flags = (FLAGS_INIT_XGXS_FIRST | 10771 .flags = FLAGS_INIT_XGXS_FIRST,
10769 FLAGS_TX_ERROR_CHECK),
10770 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 10772 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
10771 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 10773 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
10772 .mdio_ctrl = 0, 10774 .mdio_ctrl = 0,
@@ -10797,8 +10799,7 @@ static struct bnx2x_phy phy_8726 = {
10797 .addr = 0xff, 10799 .addr = 0xff,
10798 .def_md_devad = 0, 10800 .def_md_devad = 0,
10799 .flags = (FLAGS_HW_LOCK_REQUIRED | 10801 .flags = (FLAGS_HW_LOCK_REQUIRED |
10800 FLAGS_INIT_XGXS_FIRST | 10802 FLAGS_INIT_XGXS_FIRST),
10801 FLAGS_TX_ERROR_CHECK),
10802 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 10803 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
10803 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 10804 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
10804 .mdio_ctrl = 0, 10805 .mdio_ctrl = 0,
@@ -10829,8 +10830,7 @@ static struct bnx2x_phy phy_8727 = {
10829 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727, 10830 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
10830 .addr = 0xff, 10831 .addr = 0xff,
10831 .def_md_devad = 0, 10832 .def_md_devad = 0,
10832 .flags = (FLAGS_FAN_FAILURE_DET_REQ | 10833 .flags = FLAGS_FAN_FAILURE_DET_REQ,
10833 FLAGS_TX_ERROR_CHECK),
10834 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 10834 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
10835 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 10835 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
10836 .mdio_ctrl = 0, 10836 .mdio_ctrl = 0,
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index 150709111548..15f800085bb2 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -407,8 +407,8 @@ u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
407 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET); 407 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
408 408
409 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0); 409 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
410 opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) | 410 opcode |= ((BP_VN(bp) << DMAE_CMD_E1HVN_SHIFT) |
411 (BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT)); 411 (BP_VN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
412 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT); 412 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
413 413
414#ifdef __BIG_ENDIAN 414#ifdef __BIG_ENDIAN
@@ -1419,7 +1419,7 @@ static void bnx2x_hc_int_enable(struct bnx2x *bp)
1419 if (!CHIP_IS_E1(bp)) { 1419 if (!CHIP_IS_E1(bp)) {
1420 /* init leading/trailing edge */ 1420 /* init leading/trailing edge */
1421 if (IS_MF(bp)) { 1421 if (IS_MF(bp)) {
1422 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4))); 1422 val = (0xee0f | (1 << (BP_VN(bp) + 4)));
1423 if (bp->port.pmf) 1423 if (bp->port.pmf)
1424 /* enable nig and gpio3 attention */ 1424 /* enable nig and gpio3 attention */
1425 val |= 0x1100; 1425 val |= 0x1100;
@@ -1471,7 +1471,7 @@ static void bnx2x_igu_int_enable(struct bnx2x *bp)
1471 1471
1472 /* init leading/trailing edge */ 1472 /* init leading/trailing edge */
1473 if (IS_MF(bp)) { 1473 if (IS_MF(bp)) {
1474 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4))); 1474 val = (0xee0f | (1 << (BP_VN(bp) + 4)));
1475 if (bp->port.pmf) 1475 if (bp->port.pmf)
1476 /* enable nig and gpio3 attention */ 1476 /* enable nig and gpio3 attention */
1477 val |= 0x1100; 1477 val |= 0x1100;
@@ -2287,7 +2287,7 @@ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2287 int vn; 2287 int vn;
2288 2288
2289 bp->vn_weight_sum = 0; 2289 bp->vn_weight_sum = 0;
2290 for (vn = VN_0; vn < E1HVN_MAX; vn++) { 2290 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2291 u32 vn_cfg = bp->mf_config[vn]; 2291 u32 vn_cfg = bp->mf_config[vn];
2292 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> 2292 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2293 FUNC_MF_CFG_MIN_BW_SHIFT) * 100; 2293 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
@@ -2320,12 +2320,18 @@ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2320 CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 2320 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2321} 2321}
2322 2322
2323/* returns func by VN for current port */
2324static inline int func_by_vn(struct bnx2x *bp, int vn)
2325{
2326 return 2 * vn + BP_PORT(bp);
2327}
2328
2323static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn) 2329static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
2324{ 2330{
2325 struct rate_shaping_vars_per_vn m_rs_vn; 2331 struct rate_shaping_vars_per_vn m_rs_vn;
2326 struct fairness_vars_per_vn m_fair_vn; 2332 struct fairness_vars_per_vn m_fair_vn;
2327 u32 vn_cfg = bp->mf_config[vn]; 2333 u32 vn_cfg = bp->mf_config[vn];
2328 int func = 2*vn + BP_PORT(bp); 2334 int func = func_by_vn(bp, vn);
2329 u16 vn_min_rate, vn_max_rate; 2335 u16 vn_min_rate, vn_max_rate;
2330 int i; 2336 int i;
2331 2337
@@ -2422,7 +2428,7 @@ void bnx2x_read_mf_cfg(struct bnx2x *bp)
2422 * 2428 *
2423 * and there are 2 functions per port 2429 * and there are 2 functions per port
2424 */ 2430 */
2425 for (vn = VN_0; vn < E1HVN_MAX; vn++) { 2431 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2426 int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp); 2432 int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
2427 2433
2428 if (func >= E1H_FUNC_MAX) 2434 if (func >= E1H_FUNC_MAX)
@@ -2454,7 +2460,7 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2454 2460
2455 /* calculate and set min-max rate for each vn */ 2461 /* calculate and set min-max rate for each vn */
2456 if (bp->port.pmf) 2462 if (bp->port.pmf)
2457 for (vn = VN_0; vn < E1HVN_MAX; vn++) 2463 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++)
2458 bnx2x_init_vn_minmax(bp, vn); 2464 bnx2x_init_vn_minmax(bp, vn);
2459 2465
2460 /* always enable rate shaping and fairness */ 2466 /* always enable rate shaping and fairness */
@@ -2473,16 +2479,15 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2473 2479
2474static inline void bnx2x_link_sync_notify(struct bnx2x *bp) 2480static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
2475{ 2481{
2476 int port = BP_PORT(bp);
2477 int func; 2482 int func;
2478 int vn; 2483 int vn;
2479 2484
2480 /* Set the attention towards other drivers on the same port */ 2485 /* Set the attention towards other drivers on the same port */
2481 for (vn = VN_0; vn < E1HVN_MAX; vn++) { 2486 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2482 if (vn == BP_E1HVN(bp)) 2487 if (vn == BP_VN(bp))
2483 continue; 2488 continue;
2484 2489
2485 func = ((vn << 1) | port); 2490 func = func_by_vn(bp, vn);
2486 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 + 2491 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2487 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1); 2492 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2488 } 2493 }
@@ -2577,7 +2582,7 @@ static void bnx2x_pmf_update(struct bnx2x *bp)
2577 bnx2x_dcbx_pmf_update(bp); 2582 bnx2x_dcbx_pmf_update(bp);
2578 2583
2579 /* enable nig attention */ 2584 /* enable nig attention */
2580 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4))); 2585 val = (0xff0f | (1 << (BP_VN(bp) + 4)));
2581 if (bp->common.int_block == INT_BLOCK_HC) { 2586 if (bp->common.int_block == INT_BLOCK_HC) {
2582 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val); 2587 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2583 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val); 2588 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
@@ -2756,8 +2761,14 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
2756 u16 tpa_agg_size = 0; 2761 u16 tpa_agg_size = 0;
2757 2762
2758 if (!fp->disable_tpa) { 2763 if (!fp->disable_tpa) {
2759 pause->sge_th_hi = 250; 2764 pause->sge_th_lo = SGE_TH_LO(bp);
2760 pause->sge_th_lo = 150; 2765 pause->sge_th_hi = SGE_TH_HI(bp);
2766
2767 /* validate SGE ring has enough to cross high threshold */
2768 WARN_ON(bp->dropless_fc &&
2769 pause->sge_th_hi + FW_PREFETCH_CNT >
2770 MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES);
2771
2761 tpa_agg_size = min_t(u32, 2772 tpa_agg_size = min_t(u32,
2762 (min_t(u32, 8, MAX_SKB_FRAGS) * 2773 (min_t(u32, 8, MAX_SKB_FRAGS) *
2763 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff); 2774 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
@@ -2771,10 +2782,21 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
2771 2782
2772 /* pause - not for e1 */ 2783 /* pause - not for e1 */
2773 if (!CHIP_IS_E1(bp)) { 2784 if (!CHIP_IS_E1(bp)) {
2774 pause->bd_th_hi = 350; 2785 pause->bd_th_lo = BD_TH_LO(bp);
2775 pause->bd_th_lo = 250; 2786 pause->bd_th_hi = BD_TH_HI(bp);
2776 pause->rcq_th_hi = 350; 2787
2777 pause->rcq_th_lo = 250; 2788 pause->rcq_th_lo = RCQ_TH_LO(bp);
2789 pause->rcq_th_hi = RCQ_TH_HI(bp);
2790 /*
2791 * validate that rings have enough entries to cross
2792 * high thresholds
2793 */
2794 WARN_ON(bp->dropless_fc &&
2795 pause->bd_th_hi + FW_PREFETCH_CNT >
2796 bp->rx_ring_size);
2797 WARN_ON(bp->dropless_fc &&
2798 pause->rcq_th_hi + FW_PREFETCH_CNT >
2799 NUM_RCQ_RINGS * MAX_RCQ_DESC_CNT);
2778 2800
2779 pause->pri_map = 1; 2801 pause->pri_map = 1;
2780 } 2802 }
@@ -2802,9 +2824,7 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
2802 * For PF Clients it should be the maximum avaliable number. 2824 * For PF Clients it should be the maximum avaliable number.
2803 * VF driver(s) may want to define it to a smaller value. 2825 * VF driver(s) may want to define it to a smaller value.
2804 */ 2826 */
2805 rxq_init->max_tpa_queues = 2827 rxq_init->max_tpa_queues = MAX_AGG_QS(bp);
2806 (CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
2807 ETH_MAX_AGGREGATION_QUEUES_E1H_E2);
2808 2828
2809 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT; 2829 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
2810 rxq_init->fw_sb_id = fp->fw_sb_id; 2830 rxq_init->fw_sb_id = fp->fw_sb_id;
@@ -4808,6 +4828,37 @@ void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
4808 hc_sm->time_to_expire = 0xFFFFFFFF; 4828 hc_sm->time_to_expire = 0xFFFFFFFF;
4809} 4829}
4810 4830
4831
4832/* allocates state machine ids. */
4833static inline
4834void bnx2x_map_sb_state_machines(struct hc_index_data *index_data)
4835{
4836 /* zero out state machine indices */
4837 /* rx indices */
4838 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
4839
4840 /* tx indices */
4841 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
4842 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID;
4843 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID;
4844 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID;
4845
4846 /* map indices */
4847 /* rx indices */
4848 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |=
4849 SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
4850
4851 /* tx indices */
4852 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |=
4853 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
4854 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |=
4855 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
4856 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |=
4857 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
4858 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |=
4859 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
4860}
4861
4811static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid, 4862static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
4812 u8 vf_valid, int fw_sb_id, int igu_sb_id) 4863 u8 vf_valid, int fw_sb_id, int igu_sb_id)
4813{ 4864{
@@ -4839,6 +4890,7 @@ static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
4839 hc_sm_p = sb_data_e2.common.state_machine; 4890 hc_sm_p = sb_data_e2.common.state_machine;
4840 sb_data_p = (u32 *)&sb_data_e2; 4891 sb_data_p = (u32 *)&sb_data_e2;
4841 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32); 4892 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
4893 bnx2x_map_sb_state_machines(sb_data_e2.index_data);
4842 } else { 4894 } else {
4843 memset(&sb_data_e1x, 0, 4895 memset(&sb_data_e1x, 0,
4844 sizeof(struct hc_status_block_data_e1x)); 4896 sizeof(struct hc_status_block_data_e1x));
@@ -4853,6 +4905,7 @@ static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
4853 hc_sm_p = sb_data_e1x.common.state_machine; 4905 hc_sm_p = sb_data_e1x.common.state_machine;
4854 sb_data_p = (u32 *)&sb_data_e1x; 4906 sb_data_p = (u32 *)&sb_data_e1x;
4855 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32); 4907 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
4908 bnx2x_map_sb_state_machines(sb_data_e1x.index_data);
4856 } 4909 }
4857 4910
4858 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], 4911 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
@@ -4890,7 +4943,7 @@ static void bnx2x_init_def_sb(struct bnx2x *bp)
4890 int igu_seg_id; 4943 int igu_seg_id;
4891 int port = BP_PORT(bp); 4944 int port = BP_PORT(bp);
4892 int func = BP_FUNC(bp); 4945 int func = BP_FUNC(bp);
4893 int reg_offset; 4946 int reg_offset, reg_offset_en5;
4894 u64 section; 4947 u64 section;
4895 int index; 4948 int index;
4896 struct hc_sp_status_block_data sp_sb_data; 4949 struct hc_sp_status_block_data sp_sb_data;
@@ -4913,6 +4966,8 @@ static void bnx2x_init_def_sb(struct bnx2x *bp)
4913 4966
4914 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 4967 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4915 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); 4968 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4969 reg_offset_en5 = (port ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 :
4970 MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0);
4916 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { 4971 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4917 int sindex; 4972 int sindex;
4918 /* take care of sig[0]..sig[4] */ 4973 /* take care of sig[0]..sig[4] */
@@ -4927,7 +4982,7 @@ static void bnx2x_init_def_sb(struct bnx2x *bp)
4927 * and not 16 between the different groups 4982 * and not 16 between the different groups
4928 */ 4983 */
4929 bp->attn_group[index].sig[4] = REG_RD(bp, 4984 bp->attn_group[index].sig[4] = REG_RD(bp,
4930 reg_offset + 0x10 + 0x4*index); 4985 reg_offset_en5 + 0x4*index);
4931 else 4986 else
4932 bp->attn_group[index].sig[4] = 0; 4987 bp->attn_group[index].sig[4] = 0;
4933 } 4988 }
@@ -5798,6 +5853,12 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
5798 5853
5799 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_ABS_FUNC(bp)); 5854 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_ABS_FUNC(bp));
5800 5855
5856 /*
5857 * take the UNDI lock to protect undi_unload flow from accessing
5858 * registers while we're resetting the chip
5859 */
5860 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
5861
5801 bnx2x_reset_common(bp); 5862 bnx2x_reset_common(bp);
5802 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff); 5863 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5803 5864
@@ -5808,6 +5869,8 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
5808 } 5869 }
5809 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val); 5870 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val);
5810 5871
5872 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
5873
5811 bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON); 5874 bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON);
5812 5875
5813 if (!CHIP_IS_E1x(bp)) { 5876 if (!CHIP_IS_E1x(bp)) {
@@ -6663,12 +6726,16 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
6663 if (CHIP_MODE_IS_4_PORT(bp)) 6726 if (CHIP_MODE_IS_4_PORT(bp))
6664 dsb_idx = BP_FUNC(bp); 6727 dsb_idx = BP_FUNC(bp);
6665 else 6728 else
6666 dsb_idx = BP_E1HVN(bp); 6729 dsb_idx = BP_VN(bp);
6667 6730
6668 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ? 6731 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
6669 IGU_BC_BASE_DSB_PROD + dsb_idx : 6732 IGU_BC_BASE_DSB_PROD + dsb_idx :
6670 IGU_NORM_BASE_DSB_PROD + dsb_idx); 6733 IGU_NORM_BASE_DSB_PROD + dsb_idx);
6671 6734
6735 /*
6736 * igu prods come in chunks of E1HVN_MAX (4) -
6737 * does not matters what is the current chip mode
6738 */
6672 for (i = 0; i < (num_segs * E1HVN_MAX); 6739 for (i = 0; i < (num_segs * E1HVN_MAX);
6673 i += E1HVN_MAX) { 6740 i += E1HVN_MAX) {
6674 addr = IGU_REG_PROD_CONS_MEMORY + 6741 addr = IGU_REG_PROD_CONS_MEMORY +
@@ -7560,9 +7627,12 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
7560 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; 7627 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7561 u8 *mac_addr = bp->dev->dev_addr; 7628 u8 *mac_addr = bp->dev->dev_addr;
7562 u32 val; 7629 u32 val;
7630 u16 pmc;
7631
7563 /* The mac address is written to entries 1-4 to 7632 /* The mac address is written to entries 1-4 to
7564 preserve entry 0 which is used by the PMF */ 7633 * preserve entry 0 which is used by the PMF
7565 u8 entry = (BP_E1HVN(bp) + 1)*8; 7634 */
7635 u8 entry = (BP_VN(bp) + 1)*8;
7566 7636
7567 val = (mac_addr[0] << 8) | mac_addr[1]; 7637 val = (mac_addr[0] << 8) | mac_addr[1];
7568 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val); 7638 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
@@ -7571,6 +7641,11 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
7571 (mac_addr[4] << 8) | mac_addr[5]; 7641 (mac_addr[4] << 8) | mac_addr[5];
7572 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val); 7642 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7573 7643
7644 /* Enable the PME and clear the status */
7645 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmc);
7646 pmc |= PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS;
7647 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, pmc);
7648
7574 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN; 7649 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7575 7650
7576 } else 7651 } else
@@ -8538,10 +8613,12 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
8538 /* Check if there is any driver already loaded */ 8613 /* Check if there is any driver already loaded */
8539 val = REG_RD(bp, MISC_REG_UNPREPARED); 8614 val = REG_RD(bp, MISC_REG_UNPREPARED);
8540 if (val == 0x1) { 8615 if (val == 0x1) {
8541 /* Check if it is the UNDI driver 8616
8617 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
8618 /*
8619 * Check if it is the UNDI driver
8542 * UNDI driver initializes CID offset for normal bell to 0x7 8620 * UNDI driver initializes CID offset for normal bell to 0x7
8543 */ 8621 */
8544 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8545 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST); 8622 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
8546 if (val == 0x7) { 8623 if (val == 0x7) {
8547 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 8624 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
@@ -8579,9 +8656,6 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
8579 bnx2x_fw_command(bp, reset_code, 0); 8656 bnx2x_fw_command(bp, reset_code, 0);
8580 } 8657 }
8581 8658
8582 /* now it's safe to release the lock */
8583 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8584
8585 bnx2x_undi_int_disable(bp); 8659 bnx2x_undi_int_disable(bp);
8586 port = BP_PORT(bp); 8660 port = BP_PORT(bp);
8587 8661
@@ -8631,8 +8705,10 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
8631 bp->fw_seq = 8705 bp->fw_seq =
8632 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) & 8706 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
8633 DRV_MSG_SEQ_NUMBER_MASK); 8707 DRV_MSG_SEQ_NUMBER_MASK);
8634 } else 8708 }
8635 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); 8709
8710 /* now it's safe to release the lock */
8711 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
8636 } 8712 }
8637} 8713}
8638 8714
@@ -8769,13 +8845,13 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
8769static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp) 8845static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
8770{ 8846{
8771 int pfid = BP_FUNC(bp); 8847 int pfid = BP_FUNC(bp);
8772 int vn = BP_E1HVN(bp);
8773 int igu_sb_id; 8848 int igu_sb_id;
8774 u32 val; 8849 u32 val;
8775 u8 fid, igu_sb_cnt = 0; 8850 u8 fid, igu_sb_cnt = 0;
8776 8851
8777 bp->igu_base_sb = 0xff; 8852 bp->igu_base_sb = 0xff;
8778 if (CHIP_INT_MODE_IS_BC(bp)) { 8853 if (CHIP_INT_MODE_IS_BC(bp)) {
8854 int vn = BP_VN(bp);
8779 igu_sb_cnt = bp->igu_sb_cnt; 8855 igu_sb_cnt = bp->igu_sb_cnt;
8780 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) * 8856 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
8781 FP_SB_MAX_E1x; 8857 FP_SB_MAX_E1x;
@@ -9408,6 +9484,10 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
9408 bp->igu_base_sb = 0; 9484 bp->igu_base_sb = 0;
9409 } else { 9485 } else {
9410 bp->common.int_block = INT_BLOCK_IGU; 9486 bp->common.int_block = INT_BLOCK_IGU;
9487
9488 /* do not allow device reset during IGU info preocessing */
9489 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
9490
9411 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION); 9491 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
9412 9492
9413 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { 9493 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
@@ -9439,6 +9519,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
9439 9519
9440 bnx2x_get_igu_cam_info(bp); 9520 bnx2x_get_igu_cam_info(bp);
9441 9521
9522 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
9442 } 9523 }
9443 9524
9444 /* 9525 /*
@@ -9465,7 +9546,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
9465 9546
9466 bp->mf_ov = 0; 9547 bp->mf_ov = 0;
9467 bp->mf_mode = 0; 9548 bp->mf_mode = 0;
9468 vn = BP_E1HVN(bp); 9549 vn = BP_VN(bp);
9469 9550
9470 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) { 9551 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
9471 BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n", 9552 BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n",
@@ -9585,13 +9666,6 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
9585 /* port info */ 9666 /* port info */
9586 bnx2x_get_port_hwinfo(bp); 9667 bnx2x_get_port_hwinfo(bp);
9587 9668
9588 if (!BP_NOMCP(bp)) {
9589 bp->fw_seq =
9590 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
9591 DRV_MSG_SEQ_NUMBER_MASK);
9592 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9593 }
9594
9595 /* Get MAC addresses */ 9669 /* Get MAC addresses */
9596 bnx2x_get_mac_hwinfo(bp); 9670 bnx2x_get_mac_hwinfo(bp);
9597 9671
@@ -9757,6 +9831,14 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
9757 if (!BP_NOMCP(bp)) 9831 if (!BP_NOMCP(bp))
9758 bnx2x_undi_unload(bp); 9832 bnx2x_undi_unload(bp);
9759 9833
9834 /* init fw_seq after undi_unload! */
9835 if (!BP_NOMCP(bp)) {
9836 bp->fw_seq =
9837 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
9838 DRV_MSG_SEQ_NUMBER_MASK);
9839 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9840 }
9841
9760 if (CHIP_REV_IS_FPGA(bp)) 9842 if (CHIP_REV_IS_FPGA(bp))
9761 dev_err(&bp->pdev->dev, "FPGA detected\n"); 9843 dev_err(&bp->pdev->dev, "FPGA detected\n");
9762 9844
@@ -10251,10 +10333,21 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10251 /* clean indirect addresses */ 10333 /* clean indirect addresses */
10252 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, 10334 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10253 PCICFG_VENDOR_ID_OFFSET); 10335 PCICFG_VENDOR_ID_OFFSET);
10254 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0); 10336 /*
10255 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0); 10337 * Clean the following indirect addresses for all functions since it
10256 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0); 10338 * is not used by the driver.
10257 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0); 10339 */
10340 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0);
10341 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0);
10342 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0);
10343 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0);
10344
10345 if (CHIP_IS_E1x(bp)) {
10346 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
10347 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
10348 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
10349 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0);
10350 }
10258 10351
10259 /* 10352 /*
10260 * Enable internal target-read (in case we are probed after PF FLR). 10353 * Enable internal target-read (in case we are probed after PF FLR).
diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h
index 27b5ecb11830..fc7bd0f23c0b 100644
--- a/drivers/net/bnx2x/bnx2x_reg.h
+++ b/drivers/net/bnx2x/bnx2x_reg.h
@@ -1384,6 +1384,18 @@
1384 Latched ump_tx_parity; [31] MCP Latched scpad_parity; */ 1384 Latched ump_tx_parity; [31] MCP Latched scpad_parity; */
1385#define MISC_REG_AEU_ENABLE4_PXP_0 0xa108 1385#define MISC_REG_AEU_ENABLE4_PXP_0 0xa108
1386#define MISC_REG_AEU_ENABLE4_PXP_1 0xa1a8 1386#define MISC_REG_AEU_ENABLE4_PXP_1 0xa1a8
1387/* [RW 32] fifth 32b for enabling the output for function 0 output0. Mapped
1388 * as follows: [0] PGLUE config_space; [1] PGLUE misc_flr; [2] PGLUE B RBC
1389 * attention [3] PGLUE B RBC parity; [4] ATC attention; [5] ATC parity; [6]
1390 * mstat0 attention; [7] mstat0 parity; [8] mstat1 attention; [9] mstat1
1391 * parity; [31-10] Reserved; */
1392#define MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0 0xa688
1393/* [RW 32] Fifth 32b for enabling the output for function 1 output0. Mapped
1394 * as follows: [0] PGLUE config_space; [1] PGLUE misc_flr; [2] PGLUE B RBC
1395 * attention [3] PGLUE B RBC parity; [4] ATC attention; [5] ATC parity; [6]
1396 * mstat0 attention; [7] mstat0 parity; [8] mstat1 attention; [9] mstat1
1397 * parity; [31-10] Reserved; */
1398#define MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 0xa6b0
1387/* [RW 1] set/clr general attention 0; this will set/clr bit 94 in the aeu 1399/* [RW 1] set/clr general attention 0; this will set/clr bit 94 in the aeu
1388 128 bit vector */ 1400 128 bit vector */
1389#define MISC_REG_AEU_GENERAL_ATTN_0 0xa000 1401#define MISC_REG_AEU_GENERAL_ATTN_0 0xa000
@@ -3007,11 +3019,27 @@
3007/* [R 6] Debug only: Number of used entries in the data FIFO */ 3019/* [R 6] Debug only: Number of used entries in the data FIFO */
3008#define PXP2_REG_HST_DATA_FIFO_STATUS 0x12047c 3020#define PXP2_REG_HST_DATA_FIFO_STATUS 0x12047c
3009/* [R 7] Debug only: Number of used entries in the header FIFO */ 3021/* [R 7] Debug only: Number of used entries in the header FIFO */
3010#define PXP2_REG_HST_HEADER_FIFO_STATUS 0x120478 3022#define PXP2_REG_HST_HEADER_FIFO_STATUS 0x120478
3011#define PXP2_REG_PGL_ADDR_88_F0 0x120534 3023#define PXP2_REG_PGL_ADDR_88_F0 0x120534
3012#define PXP2_REG_PGL_ADDR_8C_F0 0x120538 3024/* [R 32] GRC address for configuration access to PCIE config address 0x88.
3013#define PXP2_REG_PGL_ADDR_90_F0 0x12053c 3025 * any write to this PCIE address will cause a GRC write access to the
3014#define PXP2_REG_PGL_ADDR_94_F0 0x120540 3026 * address that's in t this register */
3027#define PXP2_REG_PGL_ADDR_88_F1 0x120544
3028#define PXP2_REG_PGL_ADDR_8C_F0 0x120538
3029/* [R 32] GRC address for configuration access to PCIE config address 0x8c.
3030 * any write to this PCIE address will cause a GRC write access to the
3031 * address that's in t this register */
3032#define PXP2_REG_PGL_ADDR_8C_F1 0x120548
3033#define PXP2_REG_PGL_ADDR_90_F0 0x12053c
3034/* [R 32] GRC address for configuration access to PCIE config address 0x90.
3035 * any write to this PCIE address will cause a GRC write access to the
3036 * address that's in t this register */
3037#define PXP2_REG_PGL_ADDR_90_F1 0x12054c
3038#define PXP2_REG_PGL_ADDR_94_F0 0x120540
3039/* [R 32] GRC address for configuration access to PCIE config address 0x94.
3040 * any write to this PCIE address will cause a GRC write access to the
3041 * address that's in t this register */
3042#define PXP2_REG_PGL_ADDR_94_F1 0x120550
3015#define PXP2_REG_PGL_CONTROL0 0x120490 3043#define PXP2_REG_PGL_CONTROL0 0x120490
3016#define PXP2_REG_PGL_CONTROL1 0x120514 3044#define PXP2_REG_PGL_CONTROL1 0x120514
3017#define PXP2_REG_PGL_DEBUG 0x120520 3045#define PXP2_REG_PGL_DEBUG 0x120520
@@ -5304,7 +5332,7 @@
5304#define XCM_REG_XX_OVFL_EVNT_ID 0x20058 5332#define XCM_REG_XX_OVFL_EVNT_ID 0x20058
5305#define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS (0x1<<0) 5333#define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS (0x1<<0)
5306#define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS (0x1<<1) 5334#define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS (0x1<<1)
5307#define XMAC_CTRL_REG_CORE_LOCAL_LPBK (0x1<<3) 5335#define XMAC_CTRL_REG_LINE_LOCAL_LPBK (0x1<<2)
5308#define XMAC_CTRL_REG_RX_EN (0x1<<1) 5336#define XMAC_CTRL_REG_RX_EN (0x1<<1)
5309#define XMAC_CTRL_REG_SOFT_RESET (0x1<<6) 5337#define XMAC_CTRL_REG_SOFT_RESET (0x1<<6)
5310#define XMAC_CTRL_REG_TX_EN (0x1<<0) 5338#define XMAC_CTRL_REG_TX_EN (0x1<<0)
@@ -5750,7 +5778,7 @@
5750#define HW_LOCK_RESOURCE_RECOVERY_LEADER_0 8 5778#define HW_LOCK_RESOURCE_RECOVERY_LEADER_0 8
5751#define HW_LOCK_RESOURCE_RECOVERY_LEADER_1 9 5779#define HW_LOCK_RESOURCE_RECOVERY_LEADER_1 9
5752#define HW_LOCK_RESOURCE_SPIO 2 5780#define HW_LOCK_RESOURCE_SPIO 2
5753#define HW_LOCK_RESOURCE_UNDI 5 5781#define HW_LOCK_RESOURCE_RESET 5
5754#define AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT (0x1<<4) 5782#define AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT (0x1<<4)
5755#define AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR (0x1<<5) 5783#define AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR (0x1<<5)
5756#define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (0x1<<18) 5784#define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (0x1<<18)
@@ -6837,6 +6865,9 @@ Theotherbitsarereservedandshouldbezero*/
6837#define MDIO_WC_REG_IEEE0BLK_AUTONEGNP 0x7 6865#define MDIO_WC_REG_IEEE0BLK_AUTONEGNP 0x7
6838#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT0 0x10 6866#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT0 0x10
6839#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1 0x11 6867#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1 0x11
6868#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT2 0x12
6869#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_ABILITY 0x4000
6870#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_REQ 0x8000
6840#define MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150 0x96 6871#define MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150 0x96
6841#define MDIO_WC_REG_XGXSBLK0_XGXSCONTROL 0x8000 6872#define MDIO_WC_REG_XGXSBLK0_XGXSCONTROL 0x8000
6842#define MDIO_WC_REG_XGXSBLK0_MISCCONTROL1 0x800e 6873#define MDIO_WC_REG_XGXSBLK0_MISCCONTROL1 0x800e
diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c
index 771f6803b238..9908f2bbcf73 100644
--- a/drivers/net/bnx2x/bnx2x_stats.c
+++ b/drivers/net/bnx2x/bnx2x_stats.c
@@ -710,7 +710,8 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp)
710 break; 710 break;
711 711
712 case MAC_TYPE_NONE: /* unreached */ 712 case MAC_TYPE_NONE: /* unreached */
713 BNX2X_ERR("stats updated by DMAE but no MAC active\n"); 713 DP(BNX2X_MSG_STATS,
714 "stats updated by DMAE but no MAC active\n");
714 return -1; 715 return -1;
715 716
716 default: /* unreached */ 717 default: /* unreached */
@@ -1391,7 +1392,7 @@ static void bnx2x_port_stats_base_init(struct bnx2x *bp)
1391 1392
1392static void bnx2x_func_stats_base_init(struct bnx2x *bp) 1393static void bnx2x_func_stats_base_init(struct bnx2x *bp)
1393{ 1394{
1394 int vn, vn_max = IS_MF(bp) ? E1HVN_MAX : E1VN_MAX; 1395 int vn, vn_max = IS_MF(bp) ? BP_MAX_VN_NUM(bp) : E1VN_MAX;
1395 u32 func_stx; 1396 u32 func_stx;
1396 1397
1397 /* sanity */ 1398 /* sanity */
@@ -1404,7 +1405,7 @@ static void bnx2x_func_stats_base_init(struct bnx2x *bp)
1404 func_stx = bp->func_stx; 1405 func_stx = bp->func_stx;
1405 1406
1406 for (vn = VN_0; vn < vn_max; vn++) { 1407 for (vn = VN_0; vn < vn_max; vn++) {
1407 int mb_idx = CHIP_IS_E1x(bp) ? 2*vn + BP_PORT(bp) : vn; 1408 int mb_idx = BP_FW_MB_IDX_VN(bp, vn);
1408 1409
1409 bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param); 1410 bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param);
1410 bnx2x_func_stats_init(bp); 1411 bnx2x_func_stats_init(bp);
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index a047eb973e3b..47b928ed08f8 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -2168,7 +2168,8 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
2168 } 2168 }
2169 2169
2170re_arm: 2170re_arm:
2171 queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks); 2171 if (!bond->kill_timers)
2172 queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks);
2172out: 2173out:
2173 read_unlock(&bond->lock); 2174 read_unlock(&bond->lock);
2174} 2175}
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 7f8b20a34ee3..d4fbd2e62616 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -1440,7 +1440,8 @@ void bond_alb_monitor(struct work_struct *work)
1440 } 1440 }
1441 1441
1442re_arm: 1442re_arm:
1443 queue_delayed_work(bond->wq, &bond->alb_work, alb_delta_in_ticks); 1443 if (!bond->kill_timers)
1444 queue_delayed_work(bond->wq, &bond->alb_work, alb_delta_in_ticks);
1444out: 1445out:
1445 read_unlock(&bond->lock); 1446 read_unlock(&bond->lock);
1446} 1447}
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 38a83acd502e..6d79b78cfc75 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -777,6 +777,9 @@ static void bond_resend_igmp_join_requests(struct bonding *bond)
777 777
778 read_lock(&bond->lock); 778 read_lock(&bond->lock);
779 779
780 if (bond->kill_timers)
781 goto out;
782
780 /* rejoin all groups on bond device */ 783 /* rejoin all groups on bond device */
781 __bond_resend_igmp_join_requests(bond->dev); 784 __bond_resend_igmp_join_requests(bond->dev);
782 785
@@ -790,9 +793,9 @@ static void bond_resend_igmp_join_requests(struct bonding *bond)
790 __bond_resend_igmp_join_requests(vlan_dev); 793 __bond_resend_igmp_join_requests(vlan_dev);
791 } 794 }
792 795
793 if (--bond->igmp_retrans > 0) 796 if ((--bond->igmp_retrans > 0) && !bond->kill_timers)
794 queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5); 797 queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5);
795 798out:
796 read_unlock(&bond->lock); 799 read_unlock(&bond->lock);
797} 800}
798 801
@@ -2538,7 +2541,7 @@ void bond_mii_monitor(struct work_struct *work)
2538 } 2541 }
2539 2542
2540re_arm: 2543re_arm:
2541 if (bond->params.miimon) 2544 if (bond->params.miimon && !bond->kill_timers)
2542 queue_delayed_work(bond->wq, &bond->mii_work, 2545 queue_delayed_work(bond->wq, &bond->mii_work,
2543 msecs_to_jiffies(bond->params.miimon)); 2546 msecs_to_jiffies(bond->params.miimon));
2544out: 2547out:
@@ -2886,7 +2889,7 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
2886 } 2889 }
2887 2890
2888re_arm: 2891re_arm:
2889 if (bond->params.arp_interval) 2892 if (bond->params.arp_interval && !bond->kill_timers)
2890 queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks); 2893 queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks);
2891out: 2894out:
2892 read_unlock(&bond->lock); 2895 read_unlock(&bond->lock);
@@ -3154,7 +3157,7 @@ void bond_activebackup_arp_mon(struct work_struct *work)
3154 bond_ab_arp_probe(bond); 3157 bond_ab_arp_probe(bond);
3155 3158
3156re_arm: 3159re_arm:
3157 if (bond->params.arp_interval) 3160 if (bond->params.arp_interval && !bond->kill_timers)
3158 queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks); 3161 queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks);
3159out: 3162out:
3160 read_unlock(&bond->lock); 3163 read_unlock(&bond->lock);
@@ -3419,9 +3422,27 @@ static int bond_xmit_hash_policy_l2(struct sk_buff *skb, int count)
3419static int bond_open(struct net_device *bond_dev) 3422static int bond_open(struct net_device *bond_dev)
3420{ 3423{
3421 struct bonding *bond = netdev_priv(bond_dev); 3424 struct bonding *bond = netdev_priv(bond_dev);
3425 struct slave *slave;
3426 int i;
3422 3427
3423 bond->kill_timers = 0; 3428 bond->kill_timers = 0;
3424 3429
3430 /* reset slave->backup and slave->inactive */
3431 read_lock(&bond->lock);
3432 if (bond->slave_cnt > 0) {
3433 read_lock(&bond->curr_slave_lock);
3434 bond_for_each_slave(bond, slave, i) {
3435 if ((bond->params.mode == BOND_MODE_ACTIVEBACKUP)
3436 && (slave != bond->curr_active_slave)) {
3437 bond_set_slave_inactive_flags(slave);
3438 } else {
3439 bond_set_slave_active_flags(slave);
3440 }
3441 }
3442 read_unlock(&bond->curr_slave_lock);
3443 }
3444 read_unlock(&bond->lock);
3445
3425 INIT_DELAYED_WORK(&bond->mcast_work, bond_resend_igmp_join_requests_delayed); 3446 INIT_DELAYED_WORK(&bond->mcast_work, bond_resend_igmp_join_requests_delayed);
3426 3447
3427 if (bond_is_lb(bond)) { 3448 if (bond_is_lb(bond)) {
diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c
index 231385b8e08f..c7f3d4ea1167 100644
--- a/drivers/net/can/sja1000/plx_pci.c
+++ b/drivers/net/can/sja1000/plx_pci.c
@@ -408,7 +408,7 @@ static void plx_pci_del_card(struct pci_dev *pdev)
408 struct sja1000_priv *priv; 408 struct sja1000_priv *priv;
409 int i = 0; 409 int i = 0;
410 410
411 for (i = 0; i < card->channels; i++) { 411 for (i = 0; i < PLX_PCI_MAX_CHAN; i++) {
412 dev = card->net_dev[i]; 412 dev = card->net_dev[i];
413 if (!dev) 413 if (!dev)
414 continue; 414 continue;
@@ -536,7 +536,6 @@ static int __devinit plx_pci_add_card(struct pci_dev *pdev,
536 if (err) { 536 if (err) {
537 dev_err(&pdev->dev, "Registering device failed " 537 dev_err(&pdev->dev, "Registering device failed "
538 "(err=%d)\n", err); 538 "(err=%d)\n", err);
539 free_sja1000dev(dev);
540 goto failure_cleanup; 539 goto failure_cleanup;
541 } 540 }
542 541
@@ -549,6 +548,7 @@ static int __devinit plx_pci_add_card(struct pci_dev *pdev,
549 dev_err(&pdev->dev, "Channel #%d not detected\n", 548 dev_err(&pdev->dev, "Channel #%d not detected\n",
550 i + 1); 549 i + 1);
551 free_sja1000dev(dev); 550 free_sja1000dev(dev);
551 card->net_dev[i] = NULL;
552 } 552 }
553 } 553 }
554 554
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index f523f1cc5142..4b70b7e8bdeb 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -197,7 +197,7 @@ static void slc_bump(struct slcan *sl)
197 skb->ip_summed = CHECKSUM_UNNECESSARY; 197 skb->ip_summed = CHECKSUM_UNNECESSARY;
198 memcpy(skb_put(skb, sizeof(struct can_frame)), 198 memcpy(skb_put(skb, sizeof(struct can_frame)),
199 &cf, sizeof(struct can_frame)); 199 &cf, sizeof(struct can_frame));
200 netif_rx(skb); 200 netif_rx_ni(skb);
201 201
202 sl->dev->stats.rx_packets++; 202 sl->dev->stats.rx_packets++;
203 sl->dev->stats.rx_bytes += cf.can_dlc; 203 sl->dev->stats.rx_bytes += cf.can_dlc;
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index f7bbde9eb2cb..2adc294f512a 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -46,6 +46,7 @@
46#include <linux/skbuff.h> 46#include <linux/skbuff.h>
47#include <linux/platform_device.h> 47#include <linux/platform_device.h>
48#include <linux/clk.h> 48#include <linux/clk.h>
49#include <linux/io.h>
49 50
50#include <linux/can/dev.h> 51#include <linux/can/dev.h>
51#include <linux/can/error.h> 52#include <linux/can/error.h>
@@ -503,9 +504,9 @@ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev)
503 spin_unlock_irqrestore(&priv->mbx_lock, flags); 504 spin_unlock_irqrestore(&priv->mbx_lock, flags);
504 505
505 /* Prepare mailbox for transmission */ 506 /* Prepare mailbox for transmission */
507 data = cf->can_dlc | (get_tx_head_prio(priv) << 8);
506 if (cf->can_id & CAN_RTR_FLAG) /* Remote transmission request */ 508 if (cf->can_id & CAN_RTR_FLAG) /* Remote transmission request */
507 data |= HECC_CANMCF_RTR; 509 data |= HECC_CANMCF_RTR;
508 data |= get_tx_head_prio(priv) << 8;
509 hecc_write_mbx(priv, mbxno, HECC_CANMCF, data); 510 hecc_write_mbx(priv, mbxno, HECC_CANMCF, data);
510 511
511 if (cf->can_id & CAN_EFF_FLAG) /* Extended frame format */ 512 if (cf->can_id & CAN_EFF_FLAG) /* Extended frame format */
@@ -923,6 +924,7 @@ static int ti_hecc_probe(struct platform_device *pdev)
923 priv->can.do_get_state = ti_hecc_get_state; 924 priv->can.do_get_state = ti_hecc_get_state;
924 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES; 925 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
925 926
927 spin_lock_init(&priv->mbx_lock);
926 ndev->irq = irq->start; 928 ndev->irq = irq->start;
927 ndev->flags |= IFF_ECHO; 929 ndev->flags |= IFF_ECHO;
928 platform_set_drvdata(pdev, ndev); 930 platform_set_drvdata(pdev, ndev);
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index 646c86bcc545..fdb7a1756409 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -2452,14 +2452,13 @@ static irqreturn_t cas_interruptN(int irq, void *dev_id)
2452 struct net_device *dev = dev_id; 2452 struct net_device *dev = dev_id;
2453 struct cas *cp = netdev_priv(dev); 2453 struct cas *cp = netdev_priv(dev);
2454 unsigned long flags; 2454 unsigned long flags;
2455 int ring; 2455 int ring = (irq == cp->pci_irq_INTC) ? 2 : 3;
2456 u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(ring)); 2456 u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(ring));
2457 2457
2458 /* check for shared irq */ 2458 /* check for shared irq */
2459 if (status == 0) 2459 if (status == 0)
2460 return IRQ_NONE; 2460 return IRQ_NONE;
2461 2461
2462 ring = (irq == cp->pci_irq_INTC) ? 2 : 3;
2463 spin_lock_irqsave(&cp->lock, flags); 2462 spin_lock_irqsave(&cp->lock, flags);
2464 if (status & INTR_RX_DONE_ALT) { /* handle rx separately */ 2463 if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
2465#ifdef USE_NAPI 2464#ifdef USE_NAPI
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c
index 805076c54f1b..da5a5d9b8aff 100644
--- a/drivers/net/cxgb3/cxgb3_offload.c
+++ b/drivers/net/cxgb3/cxgb3_offload.c
@@ -1146,12 +1146,14 @@ static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
1146 if (te && te->ctx && te->client && te->client->redirect) { 1146 if (te && te->ctx && te->client && te->client->redirect) {
1147 update_tcb = te->client->redirect(te->ctx, old, new, e); 1147 update_tcb = te->client->redirect(te->ctx, old, new, e);
1148 if (update_tcb) { 1148 if (update_tcb) {
1149 rcu_read_lock();
1149 l2t_hold(L2DATA(tdev), e); 1150 l2t_hold(L2DATA(tdev), e);
1151 rcu_read_unlock();
1150 set_l2t_ix(tdev, tid, e); 1152 set_l2t_ix(tdev, tid, e);
1151 } 1153 }
1152 } 1154 }
1153 } 1155 }
1154 l2t_release(L2DATA(tdev), e); 1156 l2t_release(tdev, e);
1155} 1157}
1156 1158
1157/* 1159/*
@@ -1264,7 +1266,7 @@ int cxgb3_offload_activate(struct adapter *adapter)
1264 goto out_free; 1266 goto out_free;
1265 1267
1266 err = -ENOMEM; 1268 err = -ENOMEM;
1267 L2DATA(dev) = t3_init_l2t(l2t_capacity); 1269 RCU_INIT_POINTER(dev->l2opt, t3_init_l2t(l2t_capacity));
1268 if (!L2DATA(dev)) 1270 if (!L2DATA(dev))
1269 goto out_free; 1271 goto out_free;
1270 1272
@@ -1298,16 +1300,24 @@ int cxgb3_offload_activate(struct adapter *adapter)
1298 1300
1299out_free_l2t: 1301out_free_l2t:
1300 t3_free_l2t(L2DATA(dev)); 1302 t3_free_l2t(L2DATA(dev));
1301 L2DATA(dev) = NULL; 1303 rcu_assign_pointer(dev->l2opt, NULL);
1302out_free: 1304out_free:
1303 kfree(t); 1305 kfree(t);
1304 return err; 1306 return err;
1305} 1307}
1306 1308
1309static void clean_l2_data(struct rcu_head *head)
1310{
1311 struct l2t_data *d = container_of(head, struct l2t_data, rcu_head);
1312 t3_free_l2t(d);
1313}
1314
1315
1307void cxgb3_offload_deactivate(struct adapter *adapter) 1316void cxgb3_offload_deactivate(struct adapter *adapter)
1308{ 1317{
1309 struct t3cdev *tdev = &adapter->tdev; 1318 struct t3cdev *tdev = &adapter->tdev;
1310 struct t3c_data *t = T3C_DATA(tdev); 1319 struct t3c_data *t = T3C_DATA(tdev);
1320 struct l2t_data *d;
1311 1321
1312 remove_adapter(adapter); 1322 remove_adapter(adapter);
1313 if (list_empty(&adapter_list)) 1323 if (list_empty(&adapter_list))
@@ -1315,8 +1325,11 @@ void cxgb3_offload_deactivate(struct adapter *adapter)
1315 1325
1316 free_tid_maps(&t->tid_maps); 1326 free_tid_maps(&t->tid_maps);
1317 T3C_DATA(tdev) = NULL; 1327 T3C_DATA(tdev) = NULL;
1318 t3_free_l2t(L2DATA(tdev)); 1328 rcu_read_lock();
1319 L2DATA(tdev) = NULL; 1329 d = L2DATA(tdev);
1330 rcu_read_unlock();
1331 rcu_assign_pointer(tdev->l2opt, NULL);
1332 call_rcu(&d->rcu_head, clean_l2_data);
1320 if (t->nofail_skb) 1333 if (t->nofail_skb)
1321 kfree_skb(t->nofail_skb); 1334 kfree_skb(t->nofail_skb);
1322 kfree(t); 1335 kfree(t);
diff --git a/drivers/net/cxgb3/l2t.c b/drivers/net/cxgb3/l2t.c
index f452c4003253..41540978a173 100644
--- a/drivers/net/cxgb3/l2t.c
+++ b/drivers/net/cxgb3/l2t.c
@@ -300,14 +300,21 @@ static inline void reuse_entry(struct l2t_entry *e, struct neighbour *neigh)
300struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh, 300struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
301 struct net_device *dev) 301 struct net_device *dev)
302{ 302{
303 struct l2t_entry *e; 303 struct l2t_entry *e = NULL;
304 struct l2t_data *d = L2DATA(cdev); 304 struct l2t_data *d;
305 int hash;
305 u32 addr = *(u32 *) neigh->primary_key; 306 u32 addr = *(u32 *) neigh->primary_key;
306 int ifidx = neigh->dev->ifindex; 307 int ifidx = neigh->dev->ifindex;
307 int hash = arp_hash(addr, ifidx, d);
308 struct port_info *p = netdev_priv(dev); 308 struct port_info *p = netdev_priv(dev);
309 int smt_idx = p->port_id; 309 int smt_idx = p->port_id;
310 310
311 rcu_read_lock();
312 d = L2DATA(cdev);
313 if (!d)
314 goto done_rcu;
315
316 hash = arp_hash(addr, ifidx, d);
317
311 write_lock_bh(&d->lock); 318 write_lock_bh(&d->lock);
312 for (e = d->l2tab[hash].first; e; e = e->next) 319 for (e = d->l2tab[hash].first; e; e = e->next)
313 if (e->addr == addr && e->ifindex == ifidx && 320 if (e->addr == addr && e->ifindex == ifidx &&
@@ -338,6 +345,8 @@ struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
338 } 345 }
339done: 346done:
340 write_unlock_bh(&d->lock); 347 write_unlock_bh(&d->lock);
348done_rcu:
349 rcu_read_unlock();
341 return e; 350 return e;
342} 351}
343 352
diff --git a/drivers/net/cxgb3/l2t.h b/drivers/net/cxgb3/l2t.h
index 7a12d52ed4fc..c5f54796e2cb 100644
--- a/drivers/net/cxgb3/l2t.h
+++ b/drivers/net/cxgb3/l2t.h
@@ -76,6 +76,7 @@ struct l2t_data {
76 atomic_t nfree; /* number of free entries */ 76 atomic_t nfree; /* number of free entries */
77 rwlock_t lock; 77 rwlock_t lock;
78 struct l2t_entry l2tab[0]; 78 struct l2t_entry l2tab[0];
79 struct rcu_head rcu_head; /* to handle rcu cleanup */
79}; 80};
80 81
81typedef void (*arp_failure_handler_func)(struct t3cdev * dev, 82typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
@@ -99,7 +100,7 @@ static inline void set_arp_failure_handler(struct sk_buff *skb,
99/* 100/*
100 * Getting to the L2 data from an offload device. 101 * Getting to the L2 data from an offload device.
101 */ 102 */
102#define L2DATA(dev) ((dev)->l2opt) 103#define L2DATA(cdev) (rcu_dereference((cdev)->l2opt))
103 104
104#define W_TCB_L2T_IX 0 105#define W_TCB_L2T_IX 0
105#define S_TCB_L2T_IX 7 106#define S_TCB_L2T_IX 7
@@ -126,15 +127,22 @@ static inline int l2t_send(struct t3cdev *dev, struct sk_buff *skb,
126 return t3_l2t_send_slow(dev, skb, e); 127 return t3_l2t_send_slow(dev, skb, e);
127} 128}
128 129
129static inline void l2t_release(struct l2t_data *d, struct l2t_entry *e) 130static inline void l2t_release(struct t3cdev *t, struct l2t_entry *e)
130{ 131{
131 if (atomic_dec_and_test(&e->refcnt)) 132 struct l2t_data *d;
133
134 rcu_read_lock();
135 d = L2DATA(t);
136
137 if (atomic_dec_and_test(&e->refcnt) && d)
132 t3_l2e_free(d, e); 138 t3_l2e_free(d, e);
139
140 rcu_read_unlock();
133} 141}
134 142
135static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e) 143static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e)
136{ 144{
137 if (atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */ 145 if (d && atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */
138 atomic_dec(&d->nfree); 146 atomic_dec(&d->nfree);
139} 147}
140 148
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c
index c9957b7f17b5..b4efa292fd6f 100644
--- a/drivers/net/cxgb4/cxgb4_main.c
+++ b/drivers/net/cxgb4/cxgb4_main.c
@@ -3712,6 +3712,9 @@ static int __devinit init_one(struct pci_dev *pdev,
3712 setup_debugfs(adapter); 3712 setup_debugfs(adapter);
3713 } 3713 }
3714 3714
3715 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
3716 pdev->needs_freset = 1;
3717
3715 if (is_offload(adapter)) 3718 if (is_offload(adapter))
3716 attach_ulds(adapter); 3719 attach_ulds(adapter);
3717 3720
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index 8545c7aa93eb..a5a89ecb6f36 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -4026,6 +4026,12 @@ s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw)
4026 checksum += eeprom_data; 4026 checksum += eeprom_data;
4027 } 4027 }
4028 4028
4029#ifdef CONFIG_PARISC
4030 /* This is a signature and not a checksum on HP c8000 */
4031 if ((hw->subsystem_vendor_id == 0x103C) && (eeprom_data == 0x16d6))
4032 return E1000_SUCCESS;
4033
4034#endif
4029 if (checksum == (u16) EEPROM_SUM) 4035 if (checksum == (u16) EEPROM_SUM)
4030 return E1000_SUCCESS; 4036 return E1000_SUCCESS;
4031 else { 4037 else {
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index 480f2592f8a5..536b3a55c45f 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -2085,7 +2085,8 @@ struct e1000_info e1000_82574_info = {
2085 | FLAG_HAS_AMT 2085 | FLAG_HAS_AMT
2086 | FLAG_HAS_CTRLEXT_ON_LOAD, 2086 | FLAG_HAS_CTRLEXT_ON_LOAD,
2087 .flags2 = FLAG2_CHECK_PHY_HANG 2087 .flags2 = FLAG2_CHECK_PHY_HANG
2088 | FLAG2_DISABLE_ASPM_L0S, 2088 | FLAG2_DISABLE_ASPM_L0S
2089 | FLAG2_NO_DISABLE_RX,
2089 .pba = 32, 2090 .pba = 32,
2090 .max_hw_frame_size = DEFAULT_JUMBO, 2091 .max_hw_frame_size = DEFAULT_JUMBO,
2091 .get_variants = e1000_get_variants_82571, 2092 .get_variants = e1000_get_variants_82571,
@@ -2104,7 +2105,8 @@ struct e1000_info e1000_82583_info = {
2104 | FLAG_HAS_AMT 2105 | FLAG_HAS_AMT
2105 | FLAG_HAS_JUMBO_FRAMES 2106 | FLAG_HAS_JUMBO_FRAMES
2106 | FLAG_HAS_CTRLEXT_ON_LOAD, 2107 | FLAG_HAS_CTRLEXT_ON_LOAD,
2107 .flags2 = FLAG2_DISABLE_ASPM_L0S, 2108 .flags2 = FLAG2_DISABLE_ASPM_L0S
2109 | FLAG2_NO_DISABLE_RX,
2108 .pba = 32, 2110 .pba = 32,
2109 .max_hw_frame_size = DEFAULT_JUMBO, 2111 .max_hw_frame_size = DEFAULT_JUMBO,
2110 .get_variants = e1000_get_variants_82571, 2112 .get_variants = e1000_get_variants_82571,
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index 638d175792cf..8533ad7f3559 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -155,6 +155,9 @@ struct e1000_info;
155#define HV_M_STATUS_SPEED_1000 0x0200 155#define HV_M_STATUS_SPEED_1000 0x0200
156#define HV_M_STATUS_LINK_UP 0x0040 156#define HV_M_STATUS_LINK_UP 0x0040
157 157
158#define E1000_ICH_FWSM_PCIM2PCI 0x01000000 /* ME PCIm-to-PCI active */
159#define E1000_ICH_FWSM_PCIM2PCI_COUNT 2000
160
158/* Time to wait before putting the device into D3 if there's no link (in ms). */ 161/* Time to wait before putting the device into D3 if there's no link (in ms). */
159#define LINK_TIMEOUT 100 162#define LINK_TIMEOUT 100
160 163
@@ -453,6 +456,8 @@ struct e1000_info {
453#define FLAG2_DISABLE_ASPM_L0S (1 << 7) 456#define FLAG2_DISABLE_ASPM_L0S (1 << 7)
454#define FLAG2_DISABLE_AIM (1 << 8) 457#define FLAG2_DISABLE_AIM (1 << 8)
455#define FLAG2_CHECK_PHY_HANG (1 << 9) 458#define FLAG2_CHECK_PHY_HANG (1 << 9)
459#define FLAG2_NO_DISABLE_RX (1 << 10)
460#define FLAG2_PCIM2PCI_ARBITER_WA (1 << 11)
456 461
457#define E1000_RX_DESC_PS(R, i) \ 462#define E1000_RX_DESC_PS(R, i) \
458 (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) 463 (&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index 06d88f316dce..6a0526a59a8a 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -1206,7 +1206,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1206 rx_ring->next_to_clean = 0; 1206 rx_ring->next_to_clean = 0;
1207 1207
1208 rctl = er32(RCTL); 1208 rctl = er32(RCTL);
1209 ew32(RCTL, rctl & ~E1000_RCTL_EN); 1209 if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
1210 ew32(RCTL, rctl & ~E1000_RCTL_EN);
1210 ew32(RDBAL, ((u64) rx_ring->dma & 0xFFFFFFFF)); 1211 ew32(RDBAL, ((u64) rx_ring->dma & 0xFFFFFFFF));
1211 ew32(RDBAH, ((u64) rx_ring->dma >> 32)); 1212 ew32(RDBAH, ((u64) rx_ring->dma >> 32));
1212 ew32(RDLEN, rx_ring->size); 1213 ew32(RDLEN, rx_ring->size);
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index 4e36978b8fd8..54add27c8f76 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -137,8 +137,9 @@
137#define HV_PM_CTRL PHY_REG(770, 17) 137#define HV_PM_CTRL PHY_REG(770, 17)
138 138
139/* PHY Low Power Idle Control */ 139/* PHY Low Power Idle Control */
140#define I82579_LPI_CTRL PHY_REG(772, 20) 140#define I82579_LPI_CTRL PHY_REG(772, 20)
141#define I82579_LPI_CTRL_ENABLE_MASK 0x6000 141#define I82579_LPI_CTRL_ENABLE_MASK 0x6000
142#define I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT 0x80
142 143
143/* EMI Registers */ 144/* EMI Registers */
144#define I82579_EMI_ADDR 0x10 145#define I82579_EMI_ADDR 0x10
@@ -163,6 +164,11 @@
163#define HV_KMRN_MODE_CTRL PHY_REG(769, 16) 164#define HV_KMRN_MODE_CTRL PHY_REG(769, 16)
164#define HV_KMRN_MDIO_SLOW 0x0400 165#define HV_KMRN_MDIO_SLOW 0x0400
165 166
167/* KMRN FIFO Control and Status */
168#define HV_KMRN_FIFO_CTRLSTA PHY_REG(770, 16)
169#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK 0x7000
170#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT 12
171
166/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */ 172/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
167/* Offset 04h HSFSTS */ 173/* Offset 04h HSFSTS */
168union ich8_hws_flash_status { 174union ich8_hws_flash_status {
@@ -657,6 +663,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
657 struct e1000_mac_info *mac = &hw->mac; 663 struct e1000_mac_info *mac = &hw->mac;
658 s32 ret_val; 664 s32 ret_val;
659 bool link; 665 bool link;
666 u16 phy_reg;
660 667
661 /* 668 /*
662 * We only want to go out to the PHY registers to see if Auto-Neg 669 * We only want to go out to the PHY registers to see if Auto-Neg
@@ -689,16 +696,35 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
689 696
690 mac->get_link_status = false; 697 mac->get_link_status = false;
691 698
692 if (hw->phy.type == e1000_phy_82578) { 699 switch (hw->mac.type) {
693 ret_val = e1000_link_stall_workaround_hv(hw); 700 case e1000_pch2lan:
694 if (ret_val)
695 goto out;
696 }
697
698 if (hw->mac.type == e1000_pch2lan) {
699 ret_val = e1000_k1_workaround_lv(hw); 701 ret_val = e1000_k1_workaround_lv(hw);
700 if (ret_val) 702 if (ret_val)
701 goto out; 703 goto out;
704 /* fall-thru */
705 case e1000_pchlan:
706 if (hw->phy.type == e1000_phy_82578) {
707 ret_val = e1000_link_stall_workaround_hv(hw);
708 if (ret_val)
709 goto out;
710 }
711
712 /*
713 * Workaround for PCHx parts in half-duplex:
714 * Set the number of preambles removed from the packet
715 * when it is passed from the PHY to the MAC to prevent
716 * the MAC from misinterpreting the packet type.
717 */
718 e1e_rphy(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
719 phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
720
721 if ((er32(STATUS) & E1000_STATUS_FD) != E1000_STATUS_FD)
722 phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
723
724 e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
725 break;
726 default:
727 break;
702 } 728 }
703 729
704 /* 730 /*
@@ -788,6 +814,11 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
788 (adapter->hw.phy.type == e1000_phy_igp_3)) 814 (adapter->hw.phy.type == e1000_phy_igp_3))
789 adapter->flags |= FLAG_LSC_GIG_SPEED_DROP; 815 adapter->flags |= FLAG_LSC_GIG_SPEED_DROP;
790 816
817 /* Enable workaround for 82579 w/ ME enabled */
818 if ((adapter->hw.mac.type == e1000_pch2lan) &&
819 (er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
820 adapter->flags2 |= FLAG2_PCIM2PCI_ARBITER_WA;
821
791 /* Disable EEE by default until IEEE802.3az spec is finalized */ 822 /* Disable EEE by default until IEEE802.3az spec is finalized */
792 if (adapter->flags2 & FLAG2_HAS_EEE) 823 if (adapter->flags2 & FLAG2_HAS_EEE)
793 adapter->hw.dev_spec.ich8lan.eee_disable = true; 824 adapter->hw.dev_spec.ich8lan.eee_disable = true;
@@ -1355,7 +1386,7 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
1355 return ret_val; 1386 return ret_val;
1356 1387
1357 /* Preamble tuning for SSC */ 1388 /* Preamble tuning for SSC */
1358 ret_val = e1e_wphy(hw, PHY_REG(770, 16), 0xA204); 1389 ret_val = e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, 0xA204);
1359 if (ret_val) 1390 if (ret_val)
1360 return ret_val; 1391 return ret_val;
1361 } 1392 }
@@ -1645,6 +1676,7 @@ static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
1645 s32 ret_val = 0; 1676 s32 ret_val = 0;
1646 u16 status_reg = 0; 1677 u16 status_reg = 0;
1647 u32 mac_reg; 1678 u32 mac_reg;
1679 u16 phy_reg;
1648 1680
1649 if (hw->mac.type != e1000_pch2lan) 1681 if (hw->mac.type != e1000_pch2lan)
1650 goto out; 1682 goto out;
@@ -1659,12 +1691,19 @@ static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
1659 mac_reg = er32(FEXTNVM4); 1691 mac_reg = er32(FEXTNVM4);
1660 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK; 1692 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
1661 1693
1662 if (status_reg & HV_M_STATUS_SPEED_1000) 1694 ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg);
1695 if (ret_val)
1696 goto out;
1697
1698 if (status_reg & HV_M_STATUS_SPEED_1000) {
1663 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC; 1699 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
1664 else 1700 phy_reg &= ~I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
1701 } else {
1665 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC; 1702 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
1666 1703 phy_reg |= I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
1704 }
1667 ew32(FEXTNVM4, mac_reg); 1705 ew32(FEXTNVM4, mac_reg);
1706 ret_val = e1e_wphy(hw, I82579_LPI_CTRL, phy_reg);
1668 } 1707 }
1669 1708
1670out: 1709out:
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index 7898a67d6505..0893ab107adf 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -190,7 +190,8 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
190 /* Check for LOM (vs. NIC) or one of two valid mezzanine cards */ 190 /* Check for LOM (vs. NIC) or one of two valid mezzanine cards */
191 if (!((nvm_data & NVM_COMPAT_LOM) || 191 if (!((nvm_data & NVM_COMPAT_LOM) ||
192 (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_DUAL) || 192 (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_DUAL) ||
193 (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD))) 193 (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD) ||
194 (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES)))
194 goto out; 195 goto out;
195 196
196 ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1, 197 ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1,
@@ -200,10 +201,10 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
200 goto out; 201 goto out;
201 } 202 }
202 203
203 if (nvm_alt_mac_addr_offset == 0xFFFF) { 204 if ((nvm_alt_mac_addr_offset == 0xFFFF) ||
205 (nvm_alt_mac_addr_offset == 0x0000))
204 /* There is no Alternate MAC Address */ 206 /* There is no Alternate MAC Address */
205 goto out; 207 goto out;
206 }
207 208
208 if (hw->bus.func == E1000_FUNC_1) 209 if (hw->bus.func == E1000_FUNC_1)
209 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1; 210 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1;
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index ab4be80f7ab5..2198e615f241 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -56,7 +56,7 @@
56 56
57#define DRV_EXTRAVERSION "-k" 57#define DRV_EXTRAVERSION "-k"
58 58
59#define DRV_VERSION "1.3.16" DRV_EXTRAVERSION 59#define DRV_VERSION "1.4.4" DRV_EXTRAVERSION
60char e1000e_driver_name[] = "e1000e"; 60char e1000e_driver_name[] = "e1000e";
61const char e1000e_driver_version[] = DRV_VERSION; 61const char e1000e_driver_version[] = DRV_VERSION;
62 62
@@ -519,6 +519,63 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
519} 519}
520 520
521/** 521/**
522 * e1000e_update_tail_wa - helper function for e1000e_update_[rt]dt_wa()
523 * @hw: pointer to the HW structure
524 * @tail: address of tail descriptor register
525 * @i: value to write to tail descriptor register
526 *
527 * When updating the tail register, the ME could be accessing Host CSR
528 * registers at the same time. Normally, this is handled in h/w by an
529 * arbiter but on some parts there is a bug that acknowledges Host accesses
530 * later than it should which could result in the descriptor register to
531 * have an incorrect value. Workaround this by checking the FWSM register
532 * which has bit 24 set while ME is accessing Host CSR registers, wait
533 * if it is set and try again a number of times.
534 **/
535static inline s32 e1000e_update_tail_wa(struct e1000_hw *hw, u8 __iomem * tail,
536 unsigned int i)
537{
538 unsigned int j = 0;
539
540 while ((j++ < E1000_ICH_FWSM_PCIM2PCI_COUNT) &&
541 (er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI))
542 udelay(50);
543
544 writel(i, tail);
545
546 if ((j == E1000_ICH_FWSM_PCIM2PCI_COUNT) && (i != readl(tail)))
547 return E1000_ERR_SWFW_SYNC;
548
549 return 0;
550}
551
552static void e1000e_update_rdt_wa(struct e1000_adapter *adapter, unsigned int i)
553{
554 u8 __iomem *tail = (adapter->hw.hw_addr + adapter->rx_ring->tail);
555 struct e1000_hw *hw = &adapter->hw;
556
557 if (e1000e_update_tail_wa(hw, tail, i)) {
558 u32 rctl = er32(RCTL);
559 ew32(RCTL, rctl & ~E1000_RCTL_EN);
560 e_err("ME firmware caused invalid RDT - resetting\n");
561 schedule_work(&adapter->reset_task);
562 }
563}
564
565static void e1000e_update_tdt_wa(struct e1000_adapter *adapter, unsigned int i)
566{
567 u8 __iomem *tail = (adapter->hw.hw_addr + adapter->tx_ring->tail);
568 struct e1000_hw *hw = &adapter->hw;
569
570 if (e1000e_update_tail_wa(hw, tail, i)) {
571 u32 tctl = er32(TCTL);
572 ew32(TCTL, tctl & ~E1000_TCTL_EN);
573 e_err("ME firmware caused invalid TDT - resetting\n");
574 schedule_work(&adapter->reset_task);
575 }
576}
577
578/**
522 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended 579 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
523 * @adapter: address of board private structure 580 * @adapter: address of board private structure
524 **/ 581 **/
@@ -573,7 +630,10 @@ map_skb:
573 * such as IA-64). 630 * such as IA-64).
574 */ 631 */
575 wmb(); 632 wmb();
576 writel(i, adapter->hw.hw_addr + rx_ring->tail); 633 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
634 e1000e_update_rdt_wa(adapter, i);
635 else
636 writel(i, adapter->hw.hw_addr + rx_ring->tail);
577 } 637 }
578 i++; 638 i++;
579 if (i == rx_ring->count) 639 if (i == rx_ring->count)
@@ -673,7 +733,11 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
673 * such as IA-64). 733 * such as IA-64).
674 */ 734 */
675 wmb(); 735 wmb();
676 writel(i << 1, adapter->hw.hw_addr + rx_ring->tail); 736 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
737 e1000e_update_rdt_wa(adapter, i << 1);
738 else
739 writel(i << 1,
740 adapter->hw.hw_addr + rx_ring->tail);
677 } 741 }
678 742
679 i++; 743 i++;
@@ -756,7 +820,10 @@ check_page:
756 * applicable for weak-ordered memory model archs, 820 * applicable for weak-ordered memory model archs,
757 * such as IA-64). */ 821 * such as IA-64). */
758 wmb(); 822 wmb();
759 writel(i, adapter->hw.hw_addr + rx_ring->tail); 823 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
824 e1000e_update_rdt_wa(adapter, i);
825 else
826 writel(i, adapter->hw.hw_addr + rx_ring->tail);
760 } 827 }
761} 828}
762 829
@@ -2915,7 +2982,8 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
2915 2982
2916 /* disable receives while setting up the descriptors */ 2983 /* disable receives while setting up the descriptors */
2917 rctl = er32(RCTL); 2984 rctl = er32(RCTL);
2918 ew32(RCTL, rctl & ~E1000_RCTL_EN); 2985 if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
2986 ew32(RCTL, rctl & ~E1000_RCTL_EN);
2919 e1e_flush(); 2987 e1e_flush();
2920 usleep_range(10000, 20000); 2988 usleep_range(10000, 20000);
2921 2989
@@ -3394,7 +3462,8 @@ void e1000e_down(struct e1000_adapter *adapter)
3394 3462
3395 /* disable receives in the hardware */ 3463 /* disable receives in the hardware */
3396 rctl = er32(RCTL); 3464 rctl = er32(RCTL);
3397 ew32(RCTL, rctl & ~E1000_RCTL_EN); 3465 if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
3466 ew32(RCTL, rctl & ~E1000_RCTL_EN);
3398 /* flush and sleep below */ 3467 /* flush and sleep below */
3399 3468
3400 netif_stop_queue(netdev); 3469 netif_stop_queue(netdev);
@@ -3403,6 +3472,7 @@ void e1000e_down(struct e1000_adapter *adapter)
3403 tctl = er32(TCTL); 3472 tctl = er32(TCTL);
3404 tctl &= ~E1000_TCTL_EN; 3473 tctl &= ~E1000_TCTL_EN;
3405 ew32(TCTL, tctl); 3474 ew32(TCTL, tctl);
3475
3406 /* flush both disables and wait for them to finish */ 3476 /* flush both disables and wait for them to finish */
3407 e1e_flush(); 3477 e1e_flush();
3408 usleep_range(10000, 20000); 3478 usleep_range(10000, 20000);
@@ -4686,7 +4756,12 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,
4686 wmb(); 4756 wmb();
4687 4757
4688 tx_ring->next_to_use = i; 4758 tx_ring->next_to_use = i;
4689 writel(i, adapter->hw.hw_addr + tx_ring->tail); 4759
4760 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
4761 e1000e_update_tdt_wa(adapter, i);
4762 else
4763 writel(i, adapter->hw.hw_addr + tx_ring->tail);
4764
4690 /* 4765 /*
4691 * we need this if more than one processor can write to our tail 4766 * we need this if more than one processor can write to our tail
4692 * at a time, it synchronizes IO on IA64/Altix systems 4767 * at a time, it synchronizes IO on IA64/Altix systems
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index e55df308a3af..6d5fbd4d4256 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -5615,7 +5615,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5615 goto out_error; 5615 goto out_error;
5616 } 5616 }
5617 5617
5618 nv_vlan_mode(dev, dev->features); 5618 if (id->driver_data & DEV_HAS_VLAN)
5619 nv_vlan_mode(dev, dev->features);
5619 5620
5620 netif_carrier_off(dev); 5621 netif_carrier_off(dev);
5621 5622
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 2659daad783d..31d5c574e5a9 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -2710,8 +2710,13 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
2710 /* Tell the skb what kind of packet this is */ 2710 /* Tell the skb what kind of packet this is */
2711 skb->protocol = eth_type_trans(skb, dev); 2711 skb->protocol = eth_type_trans(skb, dev);
2712 2712
2713 /* Set vlan tag */ 2713 /*
2714 if (fcb->flags & RXFCB_VLN) 2714 * There's need to check for NETIF_F_HW_VLAN_RX here.
2715 * Even if vlan rx accel is disabled, on some chips
2716 * RXFCB_VLN is pseudo randomly set.
2717 */
2718 if (dev->features & NETIF_F_HW_VLAN_RX &&
2719 fcb->flags & RXFCB_VLN)
2715 __vlan_hwaccel_put_tag(skb, fcb->vlctl); 2720 __vlan_hwaccel_put_tag(skb, fcb->vlctl);
2716 2721
2717 /* Send the packet up the stack */ 2722 /* Send the packet up the stack */
diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c
index 6e350692d118..0caf3c323ec0 100644
--- a/drivers/net/gianfar_ethtool.c
+++ b/drivers/net/gianfar_ethtool.c
@@ -686,10 +686,21 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u
686{ 686{
687 unsigned int last_rule_idx = priv->cur_filer_idx; 687 unsigned int last_rule_idx = priv->cur_filer_idx;
688 unsigned int cmp_rqfpr; 688 unsigned int cmp_rqfpr;
689 unsigned int local_rqfpr[MAX_FILER_IDX + 1]; 689 unsigned int *local_rqfpr;
690 unsigned int local_rqfcr[MAX_FILER_IDX + 1]; 690 unsigned int *local_rqfcr;
691 int i = 0x0, k = 0x0; 691 int i = 0x0, k = 0x0;
692 int j = MAX_FILER_IDX, l = 0x0; 692 int j = MAX_FILER_IDX, l = 0x0;
693 int ret = 1;
694
695 local_rqfpr = kmalloc(sizeof(unsigned int) * (MAX_FILER_IDX + 1),
696 GFP_KERNEL);
697 local_rqfcr = kmalloc(sizeof(unsigned int) * (MAX_FILER_IDX + 1),
698 GFP_KERNEL);
699 if (!local_rqfpr || !local_rqfcr) {
700 pr_err("Out of memory\n");
701 ret = 0;
702 goto err;
703 }
693 704
694 switch (class) { 705 switch (class) {
695 case TCP_V4_FLOW: 706 case TCP_V4_FLOW:
@@ -706,7 +717,8 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u
706 break; 717 break;
707 default: 718 default:
708 pr_err("Right now this class is not supported\n"); 719 pr_err("Right now this class is not supported\n");
709 return 0; 720 ret = 0;
721 goto err;
710 } 722 }
711 723
712 for (i = 0; i < MAX_FILER_IDX + 1; i++) { 724 for (i = 0; i < MAX_FILER_IDX + 1; i++) {
@@ -721,7 +733,8 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u
721 733
722 if (i == MAX_FILER_IDX + 1) { 734 if (i == MAX_FILER_IDX + 1) {
723 pr_err("No parse rule found, can't create hash rules\n"); 735 pr_err("No parse rule found, can't create hash rules\n");
724 return 0; 736 ret = 0;
737 goto err;
725 } 738 }
726 739
727 /* If a match was found, then it begins the starting of a cluster rule 740 /* If a match was found, then it begins the starting of a cluster rule
@@ -765,7 +778,10 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u
765 priv->cur_filer_idx = priv->cur_filer_idx - 1; 778 priv->cur_filer_idx = priv->cur_filer_idx - 1;
766 } 779 }
767 780
768 return 1; 781err:
782 kfree(local_rqfcr);
783 kfree(local_rqfpr);
784 return ret;
769} 785}
770 786
771static int gfar_set_hash_opts(struct gfar_private *priv, struct ethtool_rxnfc *cmd) 787static int gfar_set_hash_opts(struct gfar_private *priv, struct ethtool_rxnfc *cmd)
@@ -1653,10 +1669,10 @@ static int gfar_get_cls_all(struct gfar_private *priv,
1653 u32 i = 0; 1669 u32 i = 0;
1654 1670
1655 list_for_each_entry(comp, &priv->rx_list.list, list) { 1671 list_for_each_entry(comp, &priv->rx_list.list, list) {
1656 if (i <= cmd->rule_cnt) { 1672 if (i == cmd->rule_cnt)
1657 rule_locs[i] = comp->fs.location; 1673 return -EMSGSIZE;
1658 i++; 1674 rule_locs[i] = comp->fs.location;
1659 } 1675 i++;
1660 } 1676 }
1661 1677
1662 cmd->data = MAX_FILER_IDX; 1678 cmd->data = MAX_FILER_IDX;
diff --git a/drivers/net/gianfar_ptp.c b/drivers/net/gianfar_ptp.c
index 1c97861596f0..f67b8aebc89c 100644
--- a/drivers/net/gianfar_ptp.c
+++ b/drivers/net/gianfar_ptp.c
@@ -193,14 +193,9 @@ static void set_alarm(struct etsects *etsects)
193/* Caller must hold etsects->lock. */ 193/* Caller must hold etsects->lock. */
194static void set_fipers(struct etsects *etsects) 194static void set_fipers(struct etsects *etsects)
195{ 195{
196 u32 tmr_ctrl = gfar_read(&etsects->regs->tmr_ctrl); 196 set_alarm(etsects);
197
198 gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl & (~TE));
199 gfar_write(&etsects->regs->tmr_prsc, etsects->tmr_prsc);
200 gfar_write(&etsects->regs->tmr_fiper1, etsects->tmr_fiper1); 197 gfar_write(&etsects->regs->tmr_fiper1, etsects->tmr_fiper1);
201 gfar_write(&etsects->regs->tmr_fiper2, etsects->tmr_fiper2); 198 gfar_write(&etsects->regs->tmr_fiper2, etsects->tmr_fiper2);
202 set_alarm(etsects);
203 gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl|TE);
204} 199}
205 200
206/* 201/*
@@ -511,7 +506,7 @@ static int gianfar_ptp_probe(struct platform_device *dev)
511 gfar_write(&etsects->regs->tmr_fiper1, etsects->tmr_fiper1); 506 gfar_write(&etsects->regs->tmr_fiper1, etsects->tmr_fiper1);
512 gfar_write(&etsects->regs->tmr_fiper2, etsects->tmr_fiper2); 507 gfar_write(&etsects->regs->tmr_fiper2, etsects->tmr_fiper2);
513 set_alarm(etsects); 508 set_alarm(etsects);
514 gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl|FS|RTPE|TE); 509 gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl|FS|RTPE|TE|FRD);
515 510
516 spin_unlock_irqrestore(&etsects->lock, flags); 511 spin_unlock_irqrestore(&etsects->lock, flags);
517 512
diff --git a/drivers/net/greth.c b/drivers/net/greth.c
index 16ce45c11934..52a39000c42c 100644
--- a/drivers/net/greth.c
+++ b/drivers/net/greth.c
@@ -428,6 +428,7 @@ greth_start_xmit(struct sk_buff *skb, struct net_device *dev)
428 dma_sync_single_for_device(greth->dev, dma_addr, skb->len, DMA_TO_DEVICE); 428 dma_sync_single_for_device(greth->dev, dma_addr, skb->len, DMA_TO_DEVICE);
429 429
430 status = GRETH_BD_EN | GRETH_BD_IE | (skb->len & GRETH_BD_LEN); 430 status = GRETH_BD_EN | GRETH_BD_IE | (skb->len & GRETH_BD_LEN);
431 greth->tx_bufs_length[greth->tx_next] = skb->len & GRETH_BD_LEN;
431 432
432 /* Wrap around descriptor ring */ 433 /* Wrap around descriptor ring */
433 if (greth->tx_next == GRETH_TXBD_NUM_MASK) { 434 if (greth->tx_next == GRETH_TXBD_NUM_MASK) {
@@ -490,7 +491,8 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
490 if (nr_frags != 0) 491 if (nr_frags != 0)
491 status = GRETH_TXBD_MORE; 492 status = GRETH_TXBD_MORE;
492 493
493 status |= GRETH_TXBD_CSALL; 494 if (skb->ip_summed == CHECKSUM_PARTIAL)
495 status |= GRETH_TXBD_CSALL;
494 status |= skb_headlen(skb) & GRETH_BD_LEN; 496 status |= skb_headlen(skb) & GRETH_BD_LEN;
495 if (greth->tx_next == GRETH_TXBD_NUM_MASK) 497 if (greth->tx_next == GRETH_TXBD_NUM_MASK)
496 status |= GRETH_BD_WR; 498 status |= GRETH_BD_WR;
@@ -513,7 +515,9 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
513 greth->tx_skbuff[curr_tx] = NULL; 515 greth->tx_skbuff[curr_tx] = NULL;
514 bdp = greth->tx_bd_base + curr_tx; 516 bdp = greth->tx_bd_base + curr_tx;
515 517
516 status = GRETH_TXBD_CSALL | GRETH_BD_EN; 518 status = GRETH_BD_EN;
519 if (skb->ip_summed == CHECKSUM_PARTIAL)
520 status |= GRETH_TXBD_CSALL;
517 status |= frag->size & GRETH_BD_LEN; 521 status |= frag->size & GRETH_BD_LEN;
518 522
519 /* Wrap around descriptor ring */ 523 /* Wrap around descriptor ring */
@@ -641,6 +645,7 @@ static void greth_clean_tx(struct net_device *dev)
641 dev->stats.tx_fifo_errors++; 645 dev->stats.tx_fifo_errors++;
642 } 646 }
643 dev->stats.tx_packets++; 647 dev->stats.tx_packets++;
648 dev->stats.tx_bytes += greth->tx_bufs_length[greth->tx_last];
644 greth->tx_last = NEXT_TX(greth->tx_last); 649 greth->tx_last = NEXT_TX(greth->tx_last);
645 greth->tx_free++; 650 greth->tx_free++;
646 } 651 }
@@ -695,6 +700,7 @@ static void greth_clean_tx_gbit(struct net_device *dev)
695 greth->tx_skbuff[greth->tx_last] = NULL; 700 greth->tx_skbuff[greth->tx_last] = NULL;
696 701
697 greth_update_tx_stats(dev, stat); 702 greth_update_tx_stats(dev, stat);
703 dev->stats.tx_bytes += skb->len;
698 704
699 bdp = greth->tx_bd_base + greth->tx_last; 705 bdp = greth->tx_bd_base + greth->tx_last;
700 706
@@ -796,6 +802,7 @@ static int greth_rx(struct net_device *dev, int limit)
796 memcpy(skb_put(skb, pkt_len), phys_to_virt(dma_addr), pkt_len); 802 memcpy(skb_put(skb, pkt_len), phys_to_virt(dma_addr), pkt_len);
797 803
798 skb->protocol = eth_type_trans(skb, dev); 804 skb->protocol = eth_type_trans(skb, dev);
805 dev->stats.rx_bytes += pkt_len;
799 dev->stats.rx_packets++; 806 dev->stats.rx_packets++;
800 netif_receive_skb(skb); 807 netif_receive_skb(skb);
801 } 808 }
@@ -910,6 +917,7 @@ static int greth_rx_gbit(struct net_device *dev, int limit)
910 917
911 skb->protocol = eth_type_trans(skb, dev); 918 skb->protocol = eth_type_trans(skb, dev);
912 dev->stats.rx_packets++; 919 dev->stats.rx_packets++;
920 dev->stats.rx_bytes += pkt_len;
913 netif_receive_skb(skb); 921 netif_receive_skb(skb);
914 922
915 greth->rx_skbuff[greth->rx_cur] = newskb; 923 greth->rx_skbuff[greth->rx_cur] = newskb;
diff --git a/drivers/net/greth.h b/drivers/net/greth.h
index 9a0040dee4da..232a622a85b7 100644
--- a/drivers/net/greth.h
+++ b/drivers/net/greth.h
@@ -103,6 +103,7 @@ struct greth_private {
103 103
104 unsigned char *tx_bufs[GRETH_TXBD_NUM]; 104 unsigned char *tx_bufs[GRETH_TXBD_NUM];
105 unsigned char *rx_bufs[GRETH_RXBD_NUM]; 105 unsigned char *rx_bufs[GRETH_RXBD_NUM];
106 u16 tx_bufs_length[GRETH_TXBD_NUM];
106 107
107 u16 tx_next; 108 u16 tx_next;
108 u16 tx_last; 109 u16 tx_last;
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index ba99af05bf62..d393f1e764ed 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -395,7 +395,7 @@ static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *ada
395} 395}
396 396
397/* recycle the current buffer on the rx queue */ 397/* recycle the current buffer on the rx queue */
398static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter) 398static int ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
399{ 399{
400 u32 q_index = adapter->rx_queue.index; 400 u32 q_index = adapter->rx_queue.index;
401 u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator; 401 u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator;
@@ -403,6 +403,7 @@ static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
403 unsigned int index = correlator & 0xffffffffUL; 403 unsigned int index = correlator & 0xffffffffUL;
404 union ibmveth_buf_desc desc; 404 union ibmveth_buf_desc desc;
405 unsigned long lpar_rc; 405 unsigned long lpar_rc;
406 int ret = 1;
406 407
407 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS); 408 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
408 BUG_ON(index >= adapter->rx_buff_pool[pool].size); 409 BUG_ON(index >= adapter->rx_buff_pool[pool].size);
@@ -410,7 +411,7 @@ static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
410 if (!adapter->rx_buff_pool[pool].active) { 411 if (!adapter->rx_buff_pool[pool].active) {
411 ibmveth_rxq_harvest_buffer(adapter); 412 ibmveth_rxq_harvest_buffer(adapter);
412 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]); 413 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]);
413 return; 414 goto out;
414 } 415 }
415 416
416 desc.fields.flags_len = IBMVETH_BUF_VALID | 417 desc.fields.flags_len = IBMVETH_BUF_VALID |
@@ -423,12 +424,16 @@ static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
423 netdev_dbg(adapter->netdev, "h_add_logical_lan_buffer failed " 424 netdev_dbg(adapter->netdev, "h_add_logical_lan_buffer failed "
424 "during recycle rc=%ld", lpar_rc); 425 "during recycle rc=%ld", lpar_rc);
425 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator); 426 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
427 ret = 0;
426 } 428 }
427 429
428 if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) { 430 if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
429 adapter->rx_queue.index = 0; 431 adapter->rx_queue.index = 0;
430 adapter->rx_queue.toggle = !adapter->rx_queue.toggle; 432 adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
431 } 433 }
434
435out:
436 return ret;
432} 437}
433 438
434static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter) 439static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
@@ -631,8 +636,8 @@ static int ibmveth_open(struct net_device *netdev)
631 netdev_err(netdev, "unable to request irq 0x%x, rc %d\n", 636 netdev_err(netdev, "unable to request irq 0x%x, rc %d\n",
632 netdev->irq, rc); 637 netdev->irq, rc);
633 do { 638 do {
634 rc = h_free_logical_lan(adapter->vdev->unit_address); 639 lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
635 } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY)); 640 } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
636 641
637 goto err_out; 642 goto err_out;
638 } 643 }
@@ -752,7 +757,7 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
752 struct ibmveth_adapter *adapter = netdev_priv(dev); 757 struct ibmveth_adapter *adapter = netdev_priv(dev);
753 unsigned long set_attr, clr_attr, ret_attr; 758 unsigned long set_attr, clr_attr, ret_attr;
754 unsigned long set_attr6, clr_attr6; 759 unsigned long set_attr6, clr_attr6;
755 long ret, ret6; 760 long ret, ret4, ret6;
756 int rc1 = 0, rc2 = 0; 761 int rc1 = 0, rc2 = 0;
757 int restart = 0; 762 int restart = 0;
758 763
@@ -765,6 +770,8 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
765 770
766 set_attr = 0; 771 set_attr = 0;
767 clr_attr = 0; 772 clr_attr = 0;
773 set_attr6 = 0;
774 clr_attr6 = 0;
768 775
769 if (data) { 776 if (data) {
770 set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM; 777 set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
@@ -779,16 +786,20 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
779 if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) && 786 if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) &&
780 !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) && 787 !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) &&
781 (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) { 788 (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) {
782 ret = h_illan_attributes(adapter->vdev->unit_address, clr_attr, 789 ret4 = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
783 set_attr, &ret_attr); 790 set_attr, &ret_attr);
784 791
785 if (ret != H_SUCCESS) { 792 if (ret4 != H_SUCCESS) {
786 netdev_err(dev, "unable to change IPv4 checksum " 793 netdev_err(dev, "unable to change IPv4 checksum "
787 "offload settings. %d rc=%ld\n", 794 "offload settings. %d rc=%ld\n",
788 data, ret); 795 data, ret4);
796
797 h_illan_attributes(adapter->vdev->unit_address,
798 set_attr, clr_attr, &ret_attr);
799
800 if (data == 1)
801 dev->features &= ~NETIF_F_IP_CSUM;
789 802
790 ret = h_illan_attributes(adapter->vdev->unit_address,
791 set_attr, clr_attr, &ret_attr);
792 } else { 803 } else {
793 adapter->fw_ipv4_csum_support = data; 804 adapter->fw_ipv4_csum_support = data;
794 } 805 }
@@ -799,15 +810,18 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
799 if (ret6 != H_SUCCESS) { 810 if (ret6 != H_SUCCESS) {
800 netdev_err(dev, "unable to change IPv6 checksum " 811 netdev_err(dev, "unable to change IPv6 checksum "
801 "offload settings. %d rc=%ld\n", 812 "offload settings. %d rc=%ld\n",
802 data, ret); 813 data, ret6);
814
815 h_illan_attributes(adapter->vdev->unit_address,
816 set_attr6, clr_attr6, &ret_attr);
817
818 if (data == 1)
819 dev->features &= ~NETIF_F_IPV6_CSUM;
803 820
804 ret = h_illan_attributes(adapter->vdev->unit_address,
805 set_attr6, clr_attr6,
806 &ret_attr);
807 } else 821 } else
808 adapter->fw_ipv6_csum_support = data; 822 adapter->fw_ipv6_csum_support = data;
809 823
810 if (ret != H_SUCCESS || ret6 != H_SUCCESS) 824 if (ret4 == H_SUCCESS || ret6 == H_SUCCESS)
811 adapter->rx_csum = data; 825 adapter->rx_csum = data;
812 else 826 else
813 rc1 = -EIO; 827 rc1 = -EIO;
@@ -925,6 +939,7 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
925 union ibmveth_buf_desc descs[6]; 939 union ibmveth_buf_desc descs[6];
926 int last, i; 940 int last, i;
927 int force_bounce = 0; 941 int force_bounce = 0;
942 dma_addr_t dma_addr;
928 943
929 /* 944 /*
930 * veth handles a maximum of 6 segments including the header, so 945 * veth handles a maximum of 6 segments including the header, so
@@ -989,17 +1004,16 @@ retry_bounce:
989 } 1004 }
990 1005
991 /* Map the header */ 1006 /* Map the header */
992 descs[0].fields.address = dma_map_single(&adapter->vdev->dev, skb->data, 1007 dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
993 skb_headlen(skb), 1008 skb_headlen(skb), DMA_TO_DEVICE);
994 DMA_TO_DEVICE); 1009 if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
995 if (dma_mapping_error(&adapter->vdev->dev, descs[0].fields.address))
996 goto map_failed; 1010 goto map_failed;
997 1011
998 descs[0].fields.flags_len = desc_flags | skb_headlen(skb); 1012 descs[0].fields.flags_len = desc_flags | skb_headlen(skb);
1013 descs[0].fields.address = dma_addr;
999 1014
1000 /* Map the frags */ 1015 /* Map the frags */
1001 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1016 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1002 unsigned long dma_addr;
1003 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1017 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1004 1018
1005 dma_addr = dma_map_page(&adapter->vdev->dev, frag->page, 1019 dma_addr = dma_map_page(&adapter->vdev->dev, frag->page,
@@ -1021,7 +1035,12 @@ retry_bounce:
1021 netdev->stats.tx_bytes += skb->len; 1035 netdev->stats.tx_bytes += skb->len;
1022 } 1036 }
1023 1037
1024 for (i = 0; i < skb_shinfo(skb)->nr_frags + 1; i++) 1038 dma_unmap_single(&adapter->vdev->dev,
1039 descs[0].fields.address,
1040 descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1041 DMA_TO_DEVICE);
1042
1043 for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++)
1025 dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address, 1044 dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
1026 descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK, 1045 descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1027 DMA_TO_DEVICE); 1046 DMA_TO_DEVICE);
@@ -1084,8 +1103,9 @@ restart_poll:
1084 if (rx_flush) 1103 if (rx_flush)
1085 ibmveth_flush_buffer(skb->data, 1104 ibmveth_flush_buffer(skb->data,
1086 length + offset); 1105 length + offset);
1106 if (!ibmveth_rxq_recycle_buffer(adapter))
1107 kfree_skb(skb);
1087 skb = new_skb; 1108 skb = new_skb;
1088 ibmveth_rxq_recycle_buffer(adapter);
1089 } else { 1109 } else {
1090 ibmveth_rxq_harvest_buffer(adapter); 1110 ibmveth_rxq_harvest_buffer(adapter);
1091 skb_reserve(skb, offset); 1111 skb_reserve(skb, offset);
diff --git a/drivers/net/irda/sh_irda.c b/drivers/net/irda/sh_irda.c
index 4488bd581eca..82660672dcd9 100644
--- a/drivers/net/irda/sh_irda.c
+++ b/drivers/net/irda/sh_irda.c
@@ -22,6 +22,8 @@
22 * - DMA transfer support 22 * - DMA transfer support
23 * - FIFO mode support 23 * - FIFO mode support
24 */ 24 */
25#include <linux/io.h>
26#include <linux/interrupt.h>
25#include <linux/module.h> 27#include <linux/module.h>
26#include <linux/platform_device.h> 28#include <linux/platform_device.h>
27#include <linux/clk.h> 29#include <linux/clk.h>
diff --git a/drivers/net/irda/sh_sir.c b/drivers/net/irda/sh_sir.c
index 52a7c86af663..ed7d7d62bf68 100644
--- a/drivers/net/irda/sh_sir.c
+++ b/drivers/net/irda/sh_sir.c
@@ -12,6 +12,8 @@
12 * published by the Free Software Foundation. 12 * published by the Free Software Foundation.
13 */ 13 */
14 14
15#include <linux/io.h>
16#include <linux/interrupt.h>
15#include <linux/module.h> 17#include <linux/module.h>
16#include <linux/platform_device.h> 18#include <linux/platform_device.h>
17#include <linux/slab.h> 19#include <linux/slab.h>
@@ -511,7 +513,7 @@ static void sh_sir_tx(struct sh_sir_self *self, int phase)
511 513
512static int sh_sir_read_data(struct sh_sir_self *self) 514static int sh_sir_read_data(struct sh_sir_self *self)
513{ 515{
514 u16 val; 516 u16 val = 0;
515 int timeout = 1024; 517 int timeout = 1024;
516 518
517 while (timeout--) { 519 while (timeout--) {
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index e86297b32733..e1fcc9589278 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -1321,8 +1321,8 @@ static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1321 if (ring_is_rsc_enabled(rx_ring)) 1321 if (ring_is_rsc_enabled(rx_ring))
1322 pkt_is_rsc = ixgbe_get_rsc_state(rx_desc); 1322 pkt_is_rsc = ixgbe_get_rsc_state(rx_desc);
1323 1323
1324 /* if this is a skb from previous receive DMA will be 0 */ 1324 /* linear means we are building an skb from multiple pages */
1325 if (rx_buffer_info->dma) { 1325 if (!skb_is_nonlinear(skb)) {
1326 u16 hlen; 1326 u16 hlen;
1327 if (pkt_is_rsc && 1327 if (pkt_is_rsc &&
1328 !(staterr & IXGBE_RXD_STAT_EOP) && 1328 !(staterr & IXGBE_RXD_STAT_EOP) &&
@@ -1459,8 +1459,10 @@ static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1459 if (ixgbe_rx_is_fcoe(adapter, rx_desc)) { 1459 if (ixgbe_rx_is_fcoe(adapter, rx_desc)) {
1460 ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb, 1460 ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb,
1461 staterr); 1461 staterr);
1462 if (!ddp_bytes) 1462 if (!ddp_bytes) {
1463 dev_kfree_skb_any(skb);
1463 goto next_desc; 1464 goto next_desc;
1465 }
1464 } 1466 }
1465#endif /* IXGBE_FCOE */ 1467#endif /* IXGBE_FCOE */
1466 ixgbe_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc); 1468 ixgbe_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index dfc82720065a..ed2a3977c6e7 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -799,5 +799,11 @@ static void __exit cleanup_netconsole(void)
799 } 799 }
800} 800}
801 801
802module_init(init_netconsole); 802/*
803 * Use late_initcall to ensure netconsole is
804 * initialized after network device driver if built-in.
805 *
806 * late_initcall() and module_init() are identical if built as module.
807 */
808late_initcall(init_netconsole);
803module_exit(cleanup_netconsole); 809module_exit(cleanup_netconsole);
diff --git a/drivers/net/pch_gbe/pch_gbe.h b/drivers/net/pch_gbe/pch_gbe.h
index 59fac77d0dbb..a09a07197eb5 100644
--- a/drivers/net/pch_gbe/pch_gbe.h
+++ b/drivers/net/pch_gbe/pch_gbe.h
@@ -127,8 +127,8 @@ struct pch_gbe_regs {
127 127
128/* Reset */ 128/* Reset */
129#define PCH_GBE_ALL_RST 0x80000000 /* All reset */ 129#define PCH_GBE_ALL_RST 0x80000000 /* All reset */
130#define PCH_GBE_TX_RST 0x40000000 /* TX MAC, TX FIFO, TX DMA reset */ 130#define PCH_GBE_TX_RST 0x00008000 /* TX MAC, TX FIFO, TX DMA reset */
131#define PCH_GBE_RX_RST 0x04000000 /* RX MAC, RX FIFO, RX DMA reset */ 131#define PCH_GBE_RX_RST 0x00004000 /* RX MAC, RX FIFO, RX DMA reset */
132 132
133/* TCP/IP Accelerator Control */ 133/* TCP/IP Accelerator Control */
134#define PCH_GBE_EX_LIST_EN 0x00000008 /* External List Enable */ 134#define PCH_GBE_EX_LIST_EN 0x00000008 /* External List Enable */
@@ -276,6 +276,9 @@ struct pch_gbe_regs {
276#define PCH_GBE_RX_DMA_EN 0x00000002 /* Enables Receive DMA */ 276#define PCH_GBE_RX_DMA_EN 0x00000002 /* Enables Receive DMA */
277#define PCH_GBE_TX_DMA_EN 0x00000001 /* Enables Transmission DMA */ 277#define PCH_GBE_TX_DMA_EN 0x00000001 /* Enables Transmission DMA */
278 278
279/* RX DMA STATUS */
280#define PCH_GBE_IDLE_CHECK 0xFFFFFFFE
281
279/* Wake On LAN Status */ 282/* Wake On LAN Status */
280#define PCH_GBE_WLS_BR 0x00000008 /* Broadcas Address */ 283#define PCH_GBE_WLS_BR 0x00000008 /* Broadcas Address */
281#define PCH_GBE_WLS_MLT 0x00000004 /* Multicast Address */ 284#define PCH_GBE_WLS_MLT 0x00000004 /* Multicast Address */
@@ -471,6 +474,7 @@ struct pch_gbe_tx_desc {
471struct pch_gbe_buffer { 474struct pch_gbe_buffer {
472 struct sk_buff *skb; 475 struct sk_buff *skb;
473 dma_addr_t dma; 476 dma_addr_t dma;
477 unsigned char *rx_buffer;
474 unsigned long time_stamp; 478 unsigned long time_stamp;
475 u16 length; 479 u16 length;
476 bool mapped; 480 bool mapped;
@@ -511,6 +515,9 @@ struct pch_gbe_tx_ring {
511struct pch_gbe_rx_ring { 515struct pch_gbe_rx_ring {
512 struct pch_gbe_rx_desc *desc; 516 struct pch_gbe_rx_desc *desc;
513 dma_addr_t dma; 517 dma_addr_t dma;
518 unsigned char *rx_buff_pool;
519 dma_addr_t rx_buff_pool_logic;
520 unsigned int rx_buff_pool_size;
514 unsigned int size; 521 unsigned int size;
515 unsigned int count; 522 unsigned int count;
516 unsigned int next_to_use; 523 unsigned int next_to_use;
@@ -622,6 +629,7 @@ struct pch_gbe_adapter {
622 unsigned long rx_buffer_len; 629 unsigned long rx_buffer_len;
623 unsigned long tx_queue_len; 630 unsigned long tx_queue_len;
624 bool have_msi; 631 bool have_msi;
632 bool rx_stop_flag;
625}; 633};
626 634
627extern const char pch_driver_version[]; 635extern const char pch_driver_version[];
diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c
index eac3c5ca9731..b8b4ba27b0e7 100644
--- a/drivers/net/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/pch_gbe/pch_gbe_main.c
@@ -20,7 +20,6 @@
20 20
21#include "pch_gbe.h" 21#include "pch_gbe.h"
22#include "pch_gbe_api.h" 22#include "pch_gbe_api.h"
23#include <linux/prefetch.h>
24 23
25#define DRV_VERSION "1.00" 24#define DRV_VERSION "1.00"
26const char pch_driver_version[] = DRV_VERSION; 25const char pch_driver_version[] = DRV_VERSION;
@@ -34,11 +33,15 @@ const char pch_driver_version[] = DRV_VERSION;
34#define PCH_GBE_WATCHDOG_PERIOD (1 * HZ) /* watchdog time */ 33#define PCH_GBE_WATCHDOG_PERIOD (1 * HZ) /* watchdog time */
35#define PCH_GBE_COPYBREAK_DEFAULT 256 34#define PCH_GBE_COPYBREAK_DEFAULT 256
36#define PCH_GBE_PCI_BAR 1 35#define PCH_GBE_PCI_BAR 1
36#define PCH_GBE_RESERVE_MEMORY 0x200000 /* 2MB */
37 37
38/* Macros for ML7223 */ 38/* Macros for ML7223 */
39#define PCI_VENDOR_ID_ROHM 0x10db 39#define PCI_VENDOR_ID_ROHM 0x10db
40#define PCI_DEVICE_ID_ROHM_ML7223_GBE 0x8013 40#define PCI_DEVICE_ID_ROHM_ML7223_GBE 0x8013
41 41
42/* Macros for ML7831 */
43#define PCI_DEVICE_ID_ROHM_ML7831_GBE 0x8802
44
42#define PCH_GBE_TX_WEIGHT 64 45#define PCH_GBE_TX_WEIGHT 64
43#define PCH_GBE_RX_WEIGHT 64 46#define PCH_GBE_RX_WEIGHT 64
44#define PCH_GBE_RX_BUFFER_WRITE 16 47#define PCH_GBE_RX_BUFFER_WRITE 16
@@ -52,6 +55,7 @@ const char pch_driver_version[] = DRV_VERSION;
52 ) 55 )
53 56
54/* Ethertype field values */ 57/* Ethertype field values */
58#define PCH_GBE_MAX_RX_BUFFER_SIZE 0x2880
55#define PCH_GBE_MAX_JUMBO_FRAME_SIZE 10318 59#define PCH_GBE_MAX_JUMBO_FRAME_SIZE 10318
56#define PCH_GBE_FRAME_SIZE_2048 2048 60#define PCH_GBE_FRAME_SIZE_2048 2048
57#define PCH_GBE_FRAME_SIZE_4096 4096 61#define PCH_GBE_FRAME_SIZE_4096 4096
@@ -83,10 +87,12 @@ const char pch_driver_version[] = DRV_VERSION;
83#define PCH_GBE_INT_ENABLE_MASK ( \ 87#define PCH_GBE_INT_ENABLE_MASK ( \
84 PCH_GBE_INT_RX_DMA_CMPLT | \ 88 PCH_GBE_INT_RX_DMA_CMPLT | \
85 PCH_GBE_INT_RX_DSC_EMP | \ 89 PCH_GBE_INT_RX_DSC_EMP | \
90 PCH_GBE_INT_RX_FIFO_ERR | \
86 PCH_GBE_INT_WOL_DET | \ 91 PCH_GBE_INT_WOL_DET | \
87 PCH_GBE_INT_TX_CMPLT \ 92 PCH_GBE_INT_TX_CMPLT \
88 ) 93 )
89 94
95#define PCH_GBE_INT_DISABLE_ALL 0
90 96
91static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT; 97static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
92 98
@@ -138,6 +144,27 @@ static void pch_gbe_wait_clr_bit(void *reg, u32 bit)
138 if (!tmp) 144 if (!tmp)
139 pr_err("Error: busy bit is not cleared\n"); 145 pr_err("Error: busy bit is not cleared\n");
140} 146}
147
148/**
149 * pch_gbe_wait_clr_bit_irq - Wait to clear a bit for interrupt context
150 * @reg: Pointer of register
151 * @busy: Busy bit
152 */
153static int pch_gbe_wait_clr_bit_irq(void *reg, u32 bit)
154{
155 u32 tmp;
156 int ret = -1;
157 /* wait busy */
158 tmp = 20;
159 while ((ioread32(reg) & bit) && --tmp)
160 udelay(5);
161 if (!tmp)
162 pr_err("Error: busy bit is not cleared\n");
163 else
164 ret = 0;
165 return ret;
166}
167
141/** 168/**
142 * pch_gbe_mac_mar_set - Set MAC address register 169 * pch_gbe_mac_mar_set - Set MAC address register
143 * @hw: Pointer to the HW structure 170 * @hw: Pointer to the HW structure
@@ -189,6 +216,17 @@ static void pch_gbe_mac_reset_hw(struct pch_gbe_hw *hw)
189 return; 216 return;
190} 217}
191 218
219static void pch_gbe_mac_reset_rx(struct pch_gbe_hw *hw)
220{
221 /* Read the MAC address. and store to the private data */
222 pch_gbe_mac_read_mac_addr(hw);
223 iowrite32(PCH_GBE_RX_RST, &hw->reg->RESET);
224 pch_gbe_wait_clr_bit_irq(&hw->reg->RESET, PCH_GBE_RX_RST);
225 /* Setup the MAC address */
226 pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
227 return;
228}
229
192/** 230/**
193 * pch_gbe_mac_init_rx_addrs - Initialize receive address's 231 * pch_gbe_mac_init_rx_addrs - Initialize receive address's
194 * @hw: Pointer to the HW structure 232 * @hw: Pointer to the HW structure
@@ -671,13 +709,8 @@ static void pch_gbe_setup_rctl(struct pch_gbe_adapter *adapter)
671 709
672 tcpip = ioread32(&hw->reg->TCPIP_ACC); 710 tcpip = ioread32(&hw->reg->TCPIP_ACC);
673 711
674 if (netdev->features & NETIF_F_RXCSUM) { 712 tcpip |= PCH_GBE_RX_TCPIPACC_OFF;
675 tcpip &= ~PCH_GBE_RX_TCPIPACC_OFF; 713 tcpip &= ~PCH_GBE_RX_TCPIPACC_EN;
676 tcpip |= PCH_GBE_RX_TCPIPACC_EN;
677 } else {
678 tcpip |= PCH_GBE_RX_TCPIPACC_OFF;
679 tcpip &= ~PCH_GBE_RX_TCPIPACC_EN;
680 }
681 iowrite32(tcpip, &hw->reg->TCPIP_ACC); 714 iowrite32(tcpip, &hw->reg->TCPIP_ACC);
682 return; 715 return;
683} 716}
@@ -717,13 +750,6 @@ static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter)
717 iowrite32(rdba, &hw->reg->RX_DSC_BASE); 750 iowrite32(rdba, &hw->reg->RX_DSC_BASE);
718 iowrite32(rdlen, &hw->reg->RX_DSC_SIZE); 751 iowrite32(rdlen, &hw->reg->RX_DSC_SIZE);
719 iowrite32((rdba + rdlen), &hw->reg->RX_DSC_SW_P); 752 iowrite32((rdba + rdlen), &hw->reg->RX_DSC_SW_P);
720
721 /* Enables Receive DMA */
722 rxdma = ioread32(&hw->reg->DMA_CTRL);
723 rxdma |= PCH_GBE_RX_DMA_EN;
724 iowrite32(rxdma, &hw->reg->DMA_CTRL);
725 /* Enables Receive */
726 iowrite32(PCH_GBE_MRE_MAC_RX_EN, &hw->reg->MAC_RX_EN);
727} 753}
728 754
729/** 755/**
@@ -1097,6 +1123,48 @@ void pch_gbe_update_stats(struct pch_gbe_adapter *adapter)
1097 spin_unlock_irqrestore(&adapter->stats_lock, flags); 1123 spin_unlock_irqrestore(&adapter->stats_lock, flags);
1098} 1124}
1099 1125
1126static void pch_gbe_stop_receive(struct pch_gbe_adapter *adapter)
1127{
1128 struct pch_gbe_hw *hw = &adapter->hw;
1129 u32 rxdma;
1130 u16 value;
1131 int ret;
1132
1133 /* Disable Receive DMA */
1134 rxdma = ioread32(&hw->reg->DMA_CTRL);
1135 rxdma &= ~PCH_GBE_RX_DMA_EN;
1136 iowrite32(rxdma, &hw->reg->DMA_CTRL);
1137 /* Wait Rx DMA BUS is IDLE */
1138 ret = pch_gbe_wait_clr_bit_irq(&hw->reg->RX_DMA_ST, PCH_GBE_IDLE_CHECK);
1139 if (ret) {
1140 /* Disable Bus master */
1141 pci_read_config_word(adapter->pdev, PCI_COMMAND, &value);
1142 value &= ~PCI_COMMAND_MASTER;
1143 pci_write_config_word(adapter->pdev, PCI_COMMAND, value);
1144 /* Stop Receive */
1145 pch_gbe_mac_reset_rx(hw);
1146 /* Enable Bus master */
1147 value |= PCI_COMMAND_MASTER;
1148 pci_write_config_word(adapter->pdev, PCI_COMMAND, value);
1149 } else {
1150 /* Stop Receive */
1151 pch_gbe_mac_reset_rx(hw);
1152 }
1153}
1154
1155static void pch_gbe_start_receive(struct pch_gbe_hw *hw)
1156{
1157 u32 rxdma;
1158
1159 /* Enables Receive DMA */
1160 rxdma = ioread32(&hw->reg->DMA_CTRL);
1161 rxdma |= PCH_GBE_RX_DMA_EN;
1162 iowrite32(rxdma, &hw->reg->DMA_CTRL);
1163 /* Enables Receive */
1164 iowrite32(PCH_GBE_MRE_MAC_RX_EN, &hw->reg->MAC_RX_EN);
1165 return;
1166}
1167
1100/** 1168/**
1101 * pch_gbe_intr - Interrupt Handler 1169 * pch_gbe_intr - Interrupt Handler
1102 * @irq: Interrupt number 1170 * @irq: Interrupt number
@@ -1123,7 +1191,17 @@ static irqreturn_t pch_gbe_intr(int irq, void *data)
1123 if (int_st & PCH_GBE_INT_RX_FRAME_ERR) 1191 if (int_st & PCH_GBE_INT_RX_FRAME_ERR)
1124 adapter->stats.intr_rx_frame_err_count++; 1192 adapter->stats.intr_rx_frame_err_count++;
1125 if (int_st & PCH_GBE_INT_RX_FIFO_ERR) 1193 if (int_st & PCH_GBE_INT_RX_FIFO_ERR)
1126 adapter->stats.intr_rx_fifo_err_count++; 1194 if (!adapter->rx_stop_flag) {
1195 adapter->stats.intr_rx_fifo_err_count++;
1196 pr_debug("Rx fifo over run\n");
1197 adapter->rx_stop_flag = true;
1198 int_en = ioread32(&hw->reg->INT_EN);
1199 iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR),
1200 &hw->reg->INT_EN);
1201 pch_gbe_stop_receive(adapter);
1202 int_st |= ioread32(&hw->reg->INT_ST);
1203 int_st = int_st & ioread32(&hw->reg->INT_EN);
1204 }
1127 if (int_st & PCH_GBE_INT_RX_DMA_ERR) 1205 if (int_st & PCH_GBE_INT_RX_DMA_ERR)
1128 adapter->stats.intr_rx_dma_err_count++; 1206 adapter->stats.intr_rx_dma_err_count++;
1129 if (int_st & PCH_GBE_INT_TX_FIFO_ERR) 1207 if (int_st & PCH_GBE_INT_TX_FIFO_ERR)
@@ -1135,21 +1213,18 @@ static irqreturn_t pch_gbe_intr(int irq, void *data)
1135 /* When Rx descriptor is empty */ 1213 /* When Rx descriptor is empty */
1136 if ((int_st & PCH_GBE_INT_RX_DSC_EMP)) { 1214 if ((int_st & PCH_GBE_INT_RX_DSC_EMP)) {
1137 adapter->stats.intr_rx_dsc_empty_count++; 1215 adapter->stats.intr_rx_dsc_empty_count++;
1138 pr_err("Rx descriptor is empty\n"); 1216 pr_debug("Rx descriptor is empty\n");
1139 int_en = ioread32(&hw->reg->INT_EN); 1217 int_en = ioread32(&hw->reg->INT_EN);
1140 iowrite32((int_en & ~PCH_GBE_INT_RX_DSC_EMP), &hw->reg->INT_EN); 1218 iowrite32((int_en & ~PCH_GBE_INT_RX_DSC_EMP), &hw->reg->INT_EN);
1141 if (hw->mac.tx_fc_enable) { 1219 if (hw->mac.tx_fc_enable) {
1142 /* Set Pause packet */ 1220 /* Set Pause packet */
1143 pch_gbe_mac_set_pause_packet(hw); 1221 pch_gbe_mac_set_pause_packet(hw);
1144 } 1222 }
1145 if ((int_en & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT))
1146 == 0) {
1147 return IRQ_HANDLED;
1148 }
1149 } 1223 }
1150 1224
1151 /* When request status is Receive interruption */ 1225 /* When request status is Receive interruption */
1152 if ((int_st & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT))) { 1226 if ((int_st & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT)) ||
1227 (adapter->rx_stop_flag == true)) {
1153 if (likely(napi_schedule_prep(&adapter->napi))) { 1228 if (likely(napi_schedule_prep(&adapter->napi))) {
1154 /* Enable only Rx Descriptor empty */ 1229 /* Enable only Rx Descriptor empty */
1155 atomic_inc(&adapter->irq_sem); 1230 atomic_inc(&adapter->irq_sem);
@@ -1185,29 +1260,23 @@ pch_gbe_alloc_rx_buffers(struct pch_gbe_adapter *adapter,
1185 unsigned int i; 1260 unsigned int i;
1186 unsigned int bufsz; 1261 unsigned int bufsz;
1187 1262
1188 bufsz = adapter->rx_buffer_len + PCH_GBE_DMA_ALIGN; 1263 bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
1189 i = rx_ring->next_to_use; 1264 i = rx_ring->next_to_use;
1190 1265
1191 while ((cleaned_count--)) { 1266 while ((cleaned_count--)) {
1192 buffer_info = &rx_ring->buffer_info[i]; 1267 buffer_info = &rx_ring->buffer_info[i];
1193 skb = buffer_info->skb; 1268 skb = netdev_alloc_skb(netdev, bufsz);
1194 if (skb) { 1269 if (unlikely(!skb)) {
1195 skb_trim(skb, 0); 1270 /* Better luck next round */
1196 } else { 1271 adapter->stats.rx_alloc_buff_failed++;
1197 skb = netdev_alloc_skb(netdev, bufsz); 1272 break;
1198 if (unlikely(!skb)) {
1199 /* Better luck next round */
1200 adapter->stats.rx_alloc_buff_failed++;
1201 break;
1202 }
1203 /* 64byte align */
1204 skb_reserve(skb, PCH_GBE_DMA_ALIGN);
1205
1206 buffer_info->skb = skb;
1207 buffer_info->length = adapter->rx_buffer_len;
1208 } 1273 }
1274 /* align */
1275 skb_reserve(skb, NET_IP_ALIGN);
1276 buffer_info->skb = skb;
1277
1209 buffer_info->dma = dma_map_single(&pdev->dev, 1278 buffer_info->dma = dma_map_single(&pdev->dev,
1210 skb->data, 1279 buffer_info->rx_buffer,
1211 buffer_info->length, 1280 buffer_info->length,
1212 DMA_FROM_DEVICE); 1281 DMA_FROM_DEVICE);
1213 if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) { 1282 if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
@@ -1240,6 +1309,36 @@ pch_gbe_alloc_rx_buffers(struct pch_gbe_adapter *adapter,
1240 return; 1309 return;
1241} 1310}
1242 1311
1312static int
1313pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter,
1314 struct pch_gbe_rx_ring *rx_ring, int cleaned_count)
1315{
1316 struct pci_dev *pdev = adapter->pdev;
1317 struct pch_gbe_buffer *buffer_info;
1318 unsigned int i;
1319 unsigned int bufsz;
1320 unsigned int size;
1321
1322 bufsz = adapter->rx_buffer_len;
1323
1324 size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY;
1325 rx_ring->rx_buff_pool = dma_alloc_coherent(&pdev->dev, size,
1326 &rx_ring->rx_buff_pool_logic,
1327 GFP_KERNEL);
1328 if (!rx_ring->rx_buff_pool) {
1329 pr_err("Unable to allocate memory for the receive poll buffer\n");
1330 return -ENOMEM;
1331 }
1332 memset(rx_ring->rx_buff_pool, 0, size);
1333 rx_ring->rx_buff_pool_size = size;
1334 for (i = 0; i < rx_ring->count; i++) {
1335 buffer_info = &rx_ring->buffer_info[i];
1336 buffer_info->rx_buffer = rx_ring->rx_buff_pool + bufsz * i;
1337 buffer_info->length = bufsz;
1338 }
1339 return 0;
1340}
1341
1243/** 1342/**
1244 * pch_gbe_alloc_tx_buffers - Allocate transmit buffers 1343 * pch_gbe_alloc_tx_buffers - Allocate transmit buffers
1245 * @adapter: Board private structure 1344 * @adapter: Board private structure
@@ -1285,7 +1384,7 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
1285 struct sk_buff *skb; 1384 struct sk_buff *skb;
1286 unsigned int i; 1385 unsigned int i;
1287 unsigned int cleaned_count = 0; 1386 unsigned int cleaned_count = 0;
1288 bool cleaned = false; 1387 bool cleaned = true;
1289 1388
1290 pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean); 1389 pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean);
1291 1390
@@ -1296,7 +1395,6 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
1296 1395
1297 while ((tx_desc->gbec_status & DSC_INIT16) == 0x0000) { 1396 while ((tx_desc->gbec_status & DSC_INIT16) == 0x0000) {
1298 pr_debug("gbec_status:0x%04x\n", tx_desc->gbec_status); 1397 pr_debug("gbec_status:0x%04x\n", tx_desc->gbec_status);
1299 cleaned = true;
1300 buffer_info = &tx_ring->buffer_info[i]; 1398 buffer_info = &tx_ring->buffer_info[i];
1301 skb = buffer_info->skb; 1399 skb = buffer_info->skb;
1302 1400
@@ -1339,8 +1437,10 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
1339 tx_desc = PCH_GBE_TX_DESC(*tx_ring, i); 1437 tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
1340 1438
1341 /* weight of a sort for tx, to avoid endless transmit cleanup */ 1439 /* weight of a sort for tx, to avoid endless transmit cleanup */
1342 if (cleaned_count++ == PCH_GBE_TX_WEIGHT) 1440 if (cleaned_count++ == PCH_GBE_TX_WEIGHT) {
1441 cleaned = false;
1343 break; 1442 break;
1443 }
1344 } 1444 }
1345 pr_debug("called pch_gbe_unmap_and_free_tx_resource() %d count\n", 1445 pr_debug("called pch_gbe_unmap_and_free_tx_resource() %d count\n",
1346 cleaned_count); 1446 cleaned_count);
@@ -1380,7 +1480,7 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
1380 unsigned int i; 1480 unsigned int i;
1381 unsigned int cleaned_count = 0; 1481 unsigned int cleaned_count = 0;
1382 bool cleaned = false; 1482 bool cleaned = false;
1383 struct sk_buff *skb, *new_skb; 1483 struct sk_buff *skb;
1384 u8 dma_status; 1484 u8 dma_status;
1385 u16 gbec_status; 1485 u16 gbec_status;
1386 u32 tcp_ip_status; 1486 u32 tcp_ip_status;
@@ -1401,13 +1501,12 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
1401 rx_desc->gbec_status = DSC_INIT16; 1501 rx_desc->gbec_status = DSC_INIT16;
1402 buffer_info = &rx_ring->buffer_info[i]; 1502 buffer_info = &rx_ring->buffer_info[i];
1403 skb = buffer_info->skb; 1503 skb = buffer_info->skb;
1504 buffer_info->skb = NULL;
1404 1505
1405 /* unmap dma */ 1506 /* unmap dma */
1406 dma_unmap_single(&pdev->dev, buffer_info->dma, 1507 dma_unmap_single(&pdev->dev, buffer_info->dma,
1407 buffer_info->length, DMA_FROM_DEVICE); 1508 buffer_info->length, DMA_FROM_DEVICE);
1408 buffer_info->mapped = false; 1509 buffer_info->mapped = false;
1409 /* Prefetch the packet */
1410 prefetch(skb->data);
1411 1510
1412 pr_debug("RxDecNo = 0x%04x Status[DMA:0x%02x GBE:0x%04x " 1511 pr_debug("RxDecNo = 0x%04x Status[DMA:0x%02x GBE:0x%04x "
1413 "TCP:0x%08x] BufInf = 0x%p\n", 1512 "TCP:0x%08x] BufInf = 0x%p\n",
@@ -1427,70 +1526,16 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
1427 pr_err("Receive CRC Error\n"); 1526 pr_err("Receive CRC Error\n");
1428 } else { 1527 } else {
1429 /* get receive length */ 1528 /* get receive length */
1430 /* length convert[-3] */ 1529 /* length convert[-3], length includes FCS length */
1431 length = (rx_desc->rx_words_eob) - 3; 1530 length = (rx_desc->rx_words_eob) - 3 - ETH_FCS_LEN;
1432 1531 if (rx_desc->rx_words_eob & 0x02)
1433 /* Decide the data conversion method */ 1532 length = length - 4;
1434 if (!(netdev->features & NETIF_F_RXCSUM)) { 1533 /*
1435 /* [Header:14][payload] */ 1534 * buffer_info->rx_buffer: [Header:14][payload]
1436 if (NET_IP_ALIGN) { 1535 * skb->data: [Reserve:2][Header:14][payload]
1437 /* Because alignment differs, 1536 */
1438 * the new_skb is newly allocated, 1537 memcpy(skb->data, buffer_info->rx_buffer, length);
1439 * and data is copied to new_skb.*/ 1538
1440 new_skb = netdev_alloc_skb(netdev,
1441 length + NET_IP_ALIGN);
1442 if (!new_skb) {
1443 /* dorrop error */
1444 pr_err("New skb allocation "
1445 "Error\n");
1446 goto dorrop;
1447 }
1448 skb_reserve(new_skb, NET_IP_ALIGN);
1449 memcpy(new_skb->data, skb->data,
1450 length);
1451 skb = new_skb;
1452 } else {
1453 /* DMA buffer is used as SKB as it is.*/
1454 buffer_info->skb = NULL;
1455 }
1456 } else {
1457 /* [Header:14][padding:2][payload] */
1458 /* The length includes padding length */
1459 length = length - PCH_GBE_DMA_PADDING;
1460 if ((length < copybreak) ||
1461 (NET_IP_ALIGN != PCH_GBE_DMA_PADDING)) {
1462 /* Because alignment differs,
1463 * the new_skb is newly allocated,
1464 * and data is copied to new_skb.
1465 * Padding data is deleted
1466 * at the time of a copy.*/
1467 new_skb = netdev_alloc_skb(netdev,
1468 length + NET_IP_ALIGN);
1469 if (!new_skb) {
1470 /* dorrop error */
1471 pr_err("New skb allocation "
1472 "Error\n");
1473 goto dorrop;
1474 }
1475 skb_reserve(new_skb, NET_IP_ALIGN);
1476 memcpy(new_skb->data, skb->data,
1477 ETH_HLEN);
1478 memcpy(&new_skb->data[ETH_HLEN],
1479 &skb->data[ETH_HLEN +
1480 PCH_GBE_DMA_PADDING],
1481 length - ETH_HLEN);
1482 skb = new_skb;
1483 } else {
1484 /* Padding data is deleted
1485 * by moving header data.*/
1486 memmove(&skb->data[PCH_GBE_DMA_PADDING],
1487 &skb->data[0], ETH_HLEN);
1488 skb_reserve(skb, NET_IP_ALIGN);
1489 buffer_info->skb = NULL;
1490 }
1491 }
1492 /* The length includes FCS length */
1493 length = length - ETH_FCS_LEN;
1494 /* update status of driver */ 1539 /* update status of driver */
1495 adapter->stats.rx_bytes += length; 1540 adapter->stats.rx_bytes += length;
1496 adapter->stats.rx_packets++; 1541 adapter->stats.rx_packets++;
@@ -1509,7 +1554,6 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
1509 pr_debug("Receive skb->ip_summed: %d length: %d\n", 1554 pr_debug("Receive skb->ip_summed: %d length: %d\n",
1510 skb->ip_summed, length); 1555 skb->ip_summed, length);
1511 } 1556 }
1512dorrop:
1513 /* return some buffers to hardware, one at a time is too slow */ 1557 /* return some buffers to hardware, one at a time is too slow */
1514 if (unlikely(cleaned_count >= PCH_GBE_RX_BUFFER_WRITE)) { 1558 if (unlikely(cleaned_count >= PCH_GBE_RX_BUFFER_WRITE)) {
1515 pch_gbe_alloc_rx_buffers(adapter, rx_ring, 1559 pch_gbe_alloc_rx_buffers(adapter, rx_ring,
@@ -1714,9 +1758,15 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter)
1714 pr_err("Error: can't bring device up\n"); 1758 pr_err("Error: can't bring device up\n");
1715 return err; 1759 return err;
1716 } 1760 }
1761 err = pch_gbe_alloc_rx_buffers_pool(adapter, rx_ring, rx_ring->count);
1762 if (err) {
1763 pr_err("Error: can't bring device up\n");
1764 return err;
1765 }
1717 pch_gbe_alloc_tx_buffers(adapter, tx_ring); 1766 pch_gbe_alloc_tx_buffers(adapter, tx_ring);
1718 pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count); 1767 pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count);
1719 adapter->tx_queue_len = netdev->tx_queue_len; 1768 adapter->tx_queue_len = netdev->tx_queue_len;
1769 pch_gbe_start_receive(&adapter->hw);
1720 1770
1721 mod_timer(&adapter->watchdog_timer, jiffies); 1771 mod_timer(&adapter->watchdog_timer, jiffies);
1722 1772
@@ -1734,6 +1784,7 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter)
1734void pch_gbe_down(struct pch_gbe_adapter *adapter) 1784void pch_gbe_down(struct pch_gbe_adapter *adapter)
1735{ 1785{
1736 struct net_device *netdev = adapter->netdev; 1786 struct net_device *netdev = adapter->netdev;
1787 struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
1737 1788
1738 /* signal that we're down so the interrupt handler does not 1789 /* signal that we're down so the interrupt handler does not
1739 * reschedule our watchdog timer */ 1790 * reschedule our watchdog timer */
@@ -1752,6 +1803,12 @@ void pch_gbe_down(struct pch_gbe_adapter *adapter)
1752 pch_gbe_reset(adapter); 1803 pch_gbe_reset(adapter);
1753 pch_gbe_clean_tx_ring(adapter, adapter->tx_ring); 1804 pch_gbe_clean_tx_ring(adapter, adapter->tx_ring);
1754 pch_gbe_clean_rx_ring(adapter, adapter->rx_ring); 1805 pch_gbe_clean_rx_ring(adapter, adapter->rx_ring);
1806
1807 pci_free_consistent(adapter->pdev, rx_ring->rx_buff_pool_size,
1808 rx_ring->rx_buff_pool, rx_ring->rx_buff_pool_logic);
1809 rx_ring->rx_buff_pool_logic = 0;
1810 rx_ring->rx_buff_pool_size = 0;
1811 rx_ring->rx_buff_pool = NULL;
1755} 1812}
1756 1813
1757/** 1814/**
@@ -2004,6 +2061,8 @@ static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
2004{ 2061{
2005 struct pch_gbe_adapter *adapter = netdev_priv(netdev); 2062 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2006 int max_frame; 2063 int max_frame;
2064 unsigned long old_rx_buffer_len = adapter->rx_buffer_len;
2065 int err;
2007 2066
2008 max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 2067 max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
2009 if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || 2068 if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
@@ -2018,14 +2077,24 @@ static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
2018 else if (max_frame <= PCH_GBE_FRAME_SIZE_8192) 2077 else if (max_frame <= PCH_GBE_FRAME_SIZE_8192)
2019 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_8192; 2078 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_8192;
2020 else 2079 else
2021 adapter->rx_buffer_len = PCH_GBE_MAX_JUMBO_FRAME_SIZE; 2080 adapter->rx_buffer_len = PCH_GBE_MAX_RX_BUFFER_SIZE;
2022 netdev->mtu = new_mtu;
2023 adapter->hw.mac.max_frame_size = max_frame;
2024 2081
2025 if (netif_running(netdev)) 2082 if (netif_running(netdev)) {
2026 pch_gbe_reinit_locked(adapter); 2083 pch_gbe_down(adapter);
2027 else 2084 err = pch_gbe_up(adapter);
2085 if (err) {
2086 adapter->rx_buffer_len = old_rx_buffer_len;
2087 pch_gbe_up(adapter);
2088 return -ENOMEM;
2089 } else {
2090 netdev->mtu = new_mtu;
2091 adapter->hw.mac.max_frame_size = max_frame;
2092 }
2093 } else {
2028 pch_gbe_reset(adapter); 2094 pch_gbe_reset(adapter);
2095 netdev->mtu = new_mtu;
2096 adapter->hw.mac.max_frame_size = max_frame;
2097 }
2029 2098
2030 pr_debug("max_frame : %d rx_buffer_len : %d mtu : %d max_frame_size : %d\n", 2099 pr_debug("max_frame : %d rx_buffer_len : %d mtu : %d max_frame_size : %d\n",
2031 max_frame, (u32) adapter->rx_buffer_len, netdev->mtu, 2100 max_frame, (u32) adapter->rx_buffer_len, netdev->mtu,
@@ -2099,33 +2168,39 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
2099{ 2168{
2100 struct pch_gbe_adapter *adapter = 2169 struct pch_gbe_adapter *adapter =
2101 container_of(napi, struct pch_gbe_adapter, napi); 2170 container_of(napi, struct pch_gbe_adapter, napi);
2102 struct net_device *netdev = adapter->netdev;
2103 int work_done = 0; 2171 int work_done = 0;
2104 bool poll_end_flag = false; 2172 bool poll_end_flag = false;
2105 bool cleaned = false; 2173 bool cleaned = false;
2174 u32 int_en;
2106 2175
2107 pr_debug("budget : %d\n", budget); 2176 pr_debug("budget : %d\n", budget);
2108 2177
2109 /* Keep link state information with original netdev */ 2178 pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget);
2110 if (!netif_carrier_ok(netdev)) { 2179 cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring);
2111 poll_end_flag = true;
2112 } else {
2113 cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring);
2114 pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget);
2115 2180
2116 if (cleaned) 2181 if (!cleaned)
2117 work_done = budget; 2182 work_done = budget;
2118 /* If no Tx and not enough Rx work done, 2183 /* If no Tx and not enough Rx work done,
2119 * exit the polling mode 2184 * exit the polling mode
2120 */ 2185 */
2121 if ((work_done < budget) || !netif_running(netdev)) 2186 if (work_done < budget)
2122 poll_end_flag = true; 2187 poll_end_flag = true;
2123 }
2124 2188
2125 if (poll_end_flag) { 2189 if (poll_end_flag) {
2126 napi_complete(napi); 2190 napi_complete(napi);
2191 if (adapter->rx_stop_flag) {
2192 adapter->rx_stop_flag = false;
2193 pch_gbe_start_receive(&adapter->hw);
2194 }
2127 pch_gbe_irq_enable(adapter); 2195 pch_gbe_irq_enable(adapter);
2128 } 2196 } else
2197 if (adapter->rx_stop_flag) {
2198 adapter->rx_stop_flag = false;
2199 pch_gbe_start_receive(&adapter->hw);
2200 int_en = ioread32(&adapter->hw.reg->INT_EN);
2201 iowrite32((int_en | PCH_GBE_INT_RX_FIFO_ERR),
2202 &adapter->hw.reg->INT_EN);
2203 }
2129 2204
2130 pr_debug("poll_end_flag : %d work_done : %d budget : %d\n", 2205 pr_debug("poll_end_flag : %d work_done : %d budget : %d\n",
2131 poll_end_flag, work_done, budget); 2206 poll_end_flag, work_done, budget);
@@ -2452,6 +2527,13 @@ static DEFINE_PCI_DEVICE_TABLE(pch_gbe_pcidev_id) = {
2452 .class = (PCI_CLASS_NETWORK_ETHERNET << 8), 2527 .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
2453 .class_mask = (0xFFFF00) 2528 .class_mask = (0xFFFF00)
2454 }, 2529 },
2530 {.vendor = PCI_VENDOR_ID_ROHM,
2531 .device = PCI_DEVICE_ID_ROHM_ML7831_GBE,
2532 .subvendor = PCI_ANY_ID,
2533 .subdevice = PCI_ANY_ID,
2534 .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
2535 .class_mask = (0xFFFF00)
2536 },
2455 /* required last entry */ 2537 /* required last entry */
2456 {0} 2538 {0}
2457}; 2539};
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index 8b3090dc4bcd..80b6f36a8074 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -82,7 +82,7 @@ static int cards_found;
82/* 82/*
83 * VLB I/O addresses 83 * VLB I/O addresses
84 */ 84 */
85static unsigned int pcnet32_portlist[] __initdata = 85static unsigned int pcnet32_portlist[] =
86 { 0x300, 0x320, 0x340, 0x360, 0 }; 86 { 0x300, 0x320, 0x340, 0x360, 0 };
87 87
88static int pcnet32_debug; 88static int pcnet32_debug;
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 2cd8dc5847b4..edd7304773eb 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -34,8 +34,7 @@
34#define PAGESEL 0x13 34#define PAGESEL 0x13
35#define LAYER4 0x02 35#define LAYER4 0x02
36#define LAYER2 0x01 36#define LAYER2 0x01
37#define MAX_RXTS 4 37#define MAX_RXTS 64
38#define MAX_TXTS 4
39#define N_EXT_TS 1 38#define N_EXT_TS 1
40#define PSF_PTPVER 2 39#define PSF_PTPVER 2
41#define PSF_EVNT 0x4000 40#define PSF_EVNT 0x4000
@@ -218,7 +217,7 @@ static void phy2rxts(struct phy_rxts *p, struct rxts *rxts)
218 rxts->seqid = p->seqid; 217 rxts->seqid = p->seqid;
219 rxts->msgtype = (p->msgtype >> 12) & 0xf; 218 rxts->msgtype = (p->msgtype >> 12) & 0xf;
220 rxts->hash = p->msgtype & 0x0fff; 219 rxts->hash = p->msgtype & 0x0fff;
221 rxts->tmo = jiffies + HZ; 220 rxts->tmo = jiffies + 2;
222} 221}
223 222
224static u64 phy2txts(struct phy_txts *p) 223static u64 phy2txts(struct phy_txts *p)
@@ -590,7 +589,7 @@ static void decode_rxts(struct dp83640_private *dp83640,
590 prune_rx_ts(dp83640); 589 prune_rx_ts(dp83640);
591 590
592 if (list_empty(&dp83640->rxpool)) { 591 if (list_empty(&dp83640->rxpool)) {
593 pr_warning("dp83640: rx timestamp pool is empty\n"); 592 pr_debug("dp83640: rx timestamp pool is empty\n");
594 goto out; 593 goto out;
595 } 594 }
596 rxts = list_first_entry(&dp83640->rxpool, struct rxts, list); 595 rxts = list_first_entry(&dp83640->rxpool, struct rxts, list);
@@ -613,7 +612,7 @@ static void decode_txts(struct dp83640_private *dp83640,
613 skb = skb_dequeue(&dp83640->tx_queue); 612 skb = skb_dequeue(&dp83640->tx_queue);
614 613
615 if (!skb) { 614 if (!skb) {
616 pr_warning("dp83640: have timestamp but tx_queue empty\n"); 615 pr_debug("dp83640: have timestamp but tx_queue empty\n");
617 return; 616 return;
618 } 617 }
619 ns = phy2txts(phy_txts); 618 ns = phy2txts(phy_txts);
diff --git a/drivers/net/phy/national.c b/drivers/net/phy/national.c
index 0620ba963508..04bb8fcc0cb5 100644
--- a/drivers/net/phy/national.c
+++ b/drivers/net/phy/national.c
@@ -25,8 +25,9 @@
25/* DP83865 phy identifier values */ 25/* DP83865 phy identifier values */
26#define DP83865_PHY_ID 0x20005c7a 26#define DP83865_PHY_ID 0x20005c7a
27 27
28#define DP83865_INT_MASK_REG 0x15 28#define DP83865_INT_STATUS 0x14
29#define DP83865_INT_MASK_STATUS 0x14 29#define DP83865_INT_MASK 0x15
30#define DP83865_INT_CLEAR 0x17
30 31
31#define DP83865_INT_REMOTE_FAULT 0x0008 32#define DP83865_INT_REMOTE_FAULT 0x0008
32#define DP83865_INT_ANE_COMPLETED 0x0010 33#define DP83865_INT_ANE_COMPLETED 0x0010
@@ -68,21 +69,25 @@ static int ns_config_intr(struct phy_device *phydev)
68 int err; 69 int err;
69 70
70 if (phydev->interrupts == PHY_INTERRUPT_ENABLED) 71 if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
71 err = phy_write(phydev, DP83865_INT_MASK_REG, 72 err = phy_write(phydev, DP83865_INT_MASK,
72 DP83865_INT_MASK_DEFAULT); 73 DP83865_INT_MASK_DEFAULT);
73 else 74 else
74 err = phy_write(phydev, DP83865_INT_MASK_REG, 0); 75 err = phy_write(phydev, DP83865_INT_MASK, 0);
75 76
76 return err; 77 return err;
77} 78}
78 79
79static int ns_ack_interrupt(struct phy_device *phydev) 80static int ns_ack_interrupt(struct phy_device *phydev)
80{ 81{
81 int ret = phy_read(phydev, DP83865_INT_MASK_STATUS); 82 int ret = phy_read(phydev, DP83865_INT_STATUS);
82 if (ret < 0) 83 if (ret < 0)
83 return ret; 84 return ret;
84 85
85 return 0; 86 /* Clear the interrupt status bit by writing a “1”
87 * to the corresponding bit in INT_CLEAR (2:0 are reserved) */
88 ret = phy_write(phydev, DP83865_INT_CLEAR, ret & ~0x7);
89
90 return ret;
86} 91}
87 92
88static void ns_giga_speed_fallback(struct phy_device *phydev, int mode) 93static void ns_giga_speed_fallback(struct phy_device *phydev, int mode)
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 10e5d985afa3..edfa15d2e795 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -1465,7 +1465,12 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
1465 continue; 1465 continue;
1466 } 1466 }
1467 1467
1468 mtu = pch->chan->mtu - hdrlen; 1468 /*
1469 * hdrlen includes the 2-byte PPP protocol field, but the
1470 * MTU counts only the payload excluding the protocol field.
1471 * (RFC1661 Section 2)
1472 */
1473 mtu = pch->chan->mtu - (hdrlen - 2);
1469 if (mtu < 4) 1474 if (mtu < 4)
1470 mtu = 4; 1475 mtu = 4;
1471 if (flen > mtu) 1476 if (flen > mtu)
diff --git a/drivers/net/pxa168_eth.c b/drivers/net/pxa168_eth.c
index 1a3033d8e7ed..d17d0624c5e6 100644
--- a/drivers/net/pxa168_eth.c
+++ b/drivers/net/pxa168_eth.c
@@ -40,6 +40,7 @@
40#include <linux/clk.h> 40#include <linux/clk.h>
41#include <linux/phy.h> 41#include <linux/phy.h>
42#include <linux/io.h> 42#include <linux/io.h>
43#include <linux/interrupt.h>
43#include <linux/types.h> 44#include <linux/types.h>
44#include <asm/pgtable.h> 45#include <asm/pgtable.h>
45#include <asm/system.h> 46#include <asm/system.h>
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 02339b3352e7..c23667017922 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -407,6 +407,7 @@ enum rtl_register_content {
407 RxOK = 0x0001, 407 RxOK = 0x0001,
408 408
409 /* RxStatusDesc */ 409 /* RxStatusDesc */
410 RxBOVF = (1 << 24),
410 RxFOVF = (1 << 23), 411 RxFOVF = (1 << 23),
411 RxRWT = (1 << 22), 412 RxRWT = (1 << 22),
412 RxRES = (1 << 21), 413 RxRES = (1 << 21),
@@ -682,6 +683,7 @@ struct rtl8169_private {
682 struct mii_if_info mii; 683 struct mii_if_info mii;
683 struct rtl8169_counters counters; 684 struct rtl8169_counters counters;
684 u32 saved_wolopts; 685 u32 saved_wolopts;
686 u32 opts1_mask;
685 687
686 struct rtl_fw { 688 struct rtl_fw {
687 const struct firmware *fw; 689 const struct firmware *fw;
@@ -710,6 +712,7 @@ MODULE_FIRMWARE(FIRMWARE_8168D_1);
710MODULE_FIRMWARE(FIRMWARE_8168D_2); 712MODULE_FIRMWARE(FIRMWARE_8168D_2);
711MODULE_FIRMWARE(FIRMWARE_8168E_1); 713MODULE_FIRMWARE(FIRMWARE_8168E_1);
712MODULE_FIRMWARE(FIRMWARE_8168E_2); 714MODULE_FIRMWARE(FIRMWARE_8168E_2);
715MODULE_FIRMWARE(FIRMWARE_8168E_3);
713MODULE_FIRMWARE(FIRMWARE_8105E_1); 716MODULE_FIRMWARE(FIRMWARE_8105E_1);
714 717
715static int rtl8169_open(struct net_device *dev); 718static int rtl8169_open(struct net_device *dev);
@@ -3077,6 +3080,14 @@ static void rtl8169_phy_reset(struct net_device *dev,
3077 netif_err(tp, link, dev, "PHY reset failed\n"); 3080 netif_err(tp, link, dev, "PHY reset failed\n");
3078} 3081}
3079 3082
3083static bool rtl_tbi_enabled(struct rtl8169_private *tp)
3084{
3085 void __iomem *ioaddr = tp->mmio_addr;
3086
3087 return (tp->mac_version == RTL_GIGA_MAC_VER_01) &&
3088 (RTL_R8(PHYstatus) & TBI_Enable);
3089}
3090
3080static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp) 3091static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
3081{ 3092{
3082 void __iomem *ioaddr = tp->mmio_addr; 3093 void __iomem *ioaddr = tp->mmio_addr;
@@ -3109,7 +3120,7 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
3109 ADVERTISED_1000baseT_Half | 3120 ADVERTISED_1000baseT_Half |
3110 ADVERTISED_1000baseT_Full : 0)); 3121 ADVERTISED_1000baseT_Full : 0));
3111 3122
3112 if (RTL_R8(PHYstatus) & TBI_Enable) 3123 if (rtl_tbi_enabled(tp))
3113 netif_info(tp, link, dev, "TBI auto-negotiating\n"); 3124 netif_info(tp, link, dev, "TBI auto-negotiating\n");
3114} 3125}
3115 3126
@@ -3319,9 +3330,16 @@ static void r810x_phy_power_up(struct rtl8169_private *tp)
3319 3330
3320static void r810x_pll_power_down(struct rtl8169_private *tp) 3331static void r810x_pll_power_down(struct rtl8169_private *tp)
3321{ 3332{
3333 void __iomem *ioaddr = tp->mmio_addr;
3334
3322 if (__rtl8169_get_wol(tp) & WAKE_ANY) { 3335 if (__rtl8169_get_wol(tp) & WAKE_ANY) {
3323 rtl_writephy(tp, 0x1f, 0x0000); 3336 rtl_writephy(tp, 0x1f, 0x0000);
3324 rtl_writephy(tp, MII_BMCR, 0x0000); 3337 rtl_writephy(tp, MII_BMCR, 0x0000);
3338
3339 if (tp->mac_version == RTL_GIGA_MAC_VER_29 ||
3340 tp->mac_version == RTL_GIGA_MAC_VER_30)
3341 RTL_W32(RxConfig, RTL_R32(RxConfig) | AcceptBroadcast |
3342 AcceptMulticast | AcceptMyPhys);
3325 return; 3343 return;
3326 } 3344 }
3327 3345
@@ -3417,7 +3435,8 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
3417 rtl_writephy(tp, MII_BMCR, 0x0000); 3435 rtl_writephy(tp, MII_BMCR, 0x0000);
3418 3436
3419 if (tp->mac_version == RTL_GIGA_MAC_VER_32 || 3437 if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
3420 tp->mac_version == RTL_GIGA_MAC_VER_33) 3438 tp->mac_version == RTL_GIGA_MAC_VER_33 ||
3439 tp->mac_version == RTL_GIGA_MAC_VER_34)
3421 RTL_W32(RxConfig, RTL_R32(RxConfig) | AcceptBroadcast | 3440 RTL_W32(RxConfig, RTL_R32(RxConfig) | AcceptBroadcast |
3422 AcceptMulticast | AcceptMyPhys); 3441 AcceptMulticast | AcceptMyPhys);
3423 return; 3442 return;
@@ -3727,8 +3746,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3727 tp->features |= rtl_try_msi(pdev, ioaddr, cfg); 3746 tp->features |= rtl_try_msi(pdev, ioaddr, cfg);
3728 RTL_W8(Cfg9346, Cfg9346_Lock); 3747 RTL_W8(Cfg9346, Cfg9346_Lock);
3729 3748
3730 if ((tp->mac_version <= RTL_GIGA_MAC_VER_06) && 3749 if (rtl_tbi_enabled(tp)) {
3731 (RTL_R8(PHYstatus) & TBI_Enable)) {
3732 tp->set_speed = rtl8169_set_speed_tbi; 3750 tp->set_speed = rtl8169_set_speed_tbi;
3733 tp->get_settings = rtl8169_gset_tbi; 3751 tp->get_settings = rtl8169_gset_tbi;
3734 tp->phy_reset_enable = rtl8169_tbi_reset_enable; 3752 tp->phy_reset_enable = rtl8169_tbi_reset_enable;
@@ -3777,6 +3795,9 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3777 tp->intr_event = cfg->intr_event; 3795 tp->intr_event = cfg->intr_event;
3778 tp->napi_event = cfg->napi_event; 3796 tp->napi_event = cfg->napi_event;
3779 3797
3798 tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ?
3799 ~(RxBOVF | RxFOVF) : ~0;
3800
3780 init_timer(&tp->timer); 3801 init_timer(&tp->timer);
3781 tp->timer.data = (unsigned long) dev; 3802 tp->timer.data = (unsigned long) dev;
3782 tp->timer.function = rtl8169_phy_timer; 3803 tp->timer.function = rtl8169_phy_timer;
@@ -3988,6 +4009,7 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
3988 while (RTL_R8(TxPoll) & NPQ) 4009 while (RTL_R8(TxPoll) & NPQ)
3989 udelay(20); 4010 udelay(20);
3990 } else if (tp->mac_version == RTL_GIGA_MAC_VER_34) { 4011 } else if (tp->mac_version == RTL_GIGA_MAC_VER_34) {
4012 RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
3991 while (!(RTL_R32(TxConfig) & TXCFG_EMPTY)) 4013 while (!(RTL_R32(TxConfig) & TXCFG_EMPTY))
3992 udelay(100); 4014 udelay(100);
3993 } else { 4015 } else {
@@ -5314,7 +5336,7 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
5314 u32 status; 5336 u32 status;
5315 5337
5316 rmb(); 5338 rmb();
5317 status = le32_to_cpu(desc->opts1); 5339 status = le32_to_cpu(desc->opts1) & tp->opts1_mask;
5318 5340
5319 if (status & DescOwn) 5341 if (status & DescOwn)
5320 break; 5342 break;
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index 86ac38c96bcf..3bb131137033 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -80,13 +80,13 @@ static int rionet_capable = 1;
80 */ 80 */
81static struct rio_dev **rionet_active; 81static struct rio_dev **rionet_active;
82 82
83#define is_rionet_capable(pef, src_ops, dst_ops) \ 83#define is_rionet_capable(src_ops, dst_ops) \
84 ((pef & RIO_PEF_INB_MBOX) && \ 84 ((src_ops & RIO_SRC_OPS_DATA_MSG) && \
85 (pef & RIO_PEF_INB_DOORBELL) && \ 85 (dst_ops & RIO_DST_OPS_DATA_MSG) && \
86 (src_ops & RIO_SRC_OPS_DOORBELL) && \ 86 (src_ops & RIO_SRC_OPS_DOORBELL) && \
87 (dst_ops & RIO_DST_OPS_DOORBELL)) 87 (dst_ops & RIO_DST_OPS_DOORBELL))
88#define dev_rionet_capable(dev) \ 88#define dev_rionet_capable(dev) \
89 is_rionet_capable(dev->pef, dev->src_ops, dev->dst_ops) 89 is_rionet_capable(dev->src_ops, dev->dst_ops)
90 90
91#define RIONET_MAC_MATCH(x) (*(u32 *)x == 0x00010001) 91#define RIONET_MAC_MATCH(x) (*(u32 *)x == 0x00010001)
92#define RIONET_GET_DESTID(x) (*(u16 *)(x + 4)) 92#define RIONET_GET_DESTID(x) (*(u16 *)(x + 4))
@@ -282,7 +282,6 @@ static int rionet_open(struct net_device *ndev)
282{ 282{
283 int i, rc = 0; 283 int i, rc = 0;
284 struct rionet_peer *peer, *tmp; 284 struct rionet_peer *peer, *tmp;
285 u32 pwdcsr;
286 struct rionet_private *rnet = netdev_priv(ndev); 285 struct rionet_private *rnet = netdev_priv(ndev);
287 286
288 if (netif_msg_ifup(rnet)) 287 if (netif_msg_ifup(rnet))
@@ -332,13 +331,8 @@ static int rionet_open(struct net_device *ndev)
332 continue; 331 continue;
333 } 332 }
334 333
335 /* 334 /* Send a join message */
336 * If device has initialized inbound doorbells, 335 rio_send_doorbell(peer->rdev, RIONET_DOORBELL_JOIN);
337 * send a join message
338 */
339 rio_read_config_32(peer->rdev, RIO_WRITE_PORT_CSR, &pwdcsr);
340 if (pwdcsr & RIO_DOORBELL_AVAIL)
341 rio_send_doorbell(peer->rdev, RIONET_DOORBELL_JOIN);
342 } 336 }
343 337
344 out: 338 out:
@@ -492,7 +486,7 @@ static int rionet_setup_netdev(struct rio_mport *mport, struct net_device *ndev)
492static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id) 486static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id)
493{ 487{
494 int rc = -ENODEV; 488 int rc = -ENODEV;
495 u32 lpef, lsrc_ops, ldst_ops; 489 u32 lsrc_ops, ldst_ops;
496 struct rionet_peer *peer; 490 struct rionet_peer *peer;
497 struct net_device *ndev = NULL; 491 struct net_device *ndev = NULL;
498 492
@@ -515,12 +509,11 @@ static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id)
515 * on later probes 509 * on later probes
516 */ 510 */
517 if (!rionet_check) { 511 if (!rionet_check) {
518 rio_local_read_config_32(rdev->net->hport, RIO_PEF_CAR, &lpef);
519 rio_local_read_config_32(rdev->net->hport, RIO_SRC_OPS_CAR, 512 rio_local_read_config_32(rdev->net->hport, RIO_SRC_OPS_CAR,
520 &lsrc_ops); 513 &lsrc_ops);
521 rio_local_read_config_32(rdev->net->hport, RIO_DST_OPS_CAR, 514 rio_local_read_config_32(rdev->net->hport, RIO_DST_OPS_CAR,
522 &ldst_ops); 515 &ldst_ops);
523 if (!is_rionet_capable(lpef, lsrc_ops, ldst_ops)) { 516 if (!is_rionet_capable(lsrc_ops, ldst_ops)) {
524 printk(KERN_ERR 517 printk(KERN_ERR
525 "%s: local device is not network capable\n", 518 "%s: local device is not network capable\n",
526 DRV_NAME); 519 DRV_NAME);
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index faca764aa21b..b59abc706d93 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -1050,7 +1050,6 @@ static int efx_init_io(struct efx_nic *efx)
1050{ 1050{
1051 struct pci_dev *pci_dev = efx->pci_dev; 1051 struct pci_dev *pci_dev = efx->pci_dev;
1052 dma_addr_t dma_mask = efx->type->max_dma_mask; 1052 dma_addr_t dma_mask = efx->type->max_dma_mask;
1053 bool use_wc;
1054 int rc; 1053 int rc;
1055 1054
1056 netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n"); 1055 netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
@@ -1101,21 +1100,8 @@ static int efx_init_io(struct efx_nic *efx)
1101 rc = -EIO; 1100 rc = -EIO;
1102 goto fail3; 1101 goto fail3;
1103 } 1102 }
1104 1103 efx->membase = ioremap_nocache(efx->membase_phys,
1105 /* bug22643: If SR-IOV is enabled then tx push over a write combined 1104 efx->type->mem_map_size);
1106 * mapping is unsafe. We need to disable write combining in this case.
1107 * MSI is unsupported when SR-IOV is enabled, and the firmware will
1108 * have removed the MSI capability. So write combining is safe if
1109 * there is an MSI capability.
1110 */
1111 use_wc = (!EFX_WORKAROUND_22643(efx) ||
1112 pci_find_capability(pci_dev, PCI_CAP_ID_MSI));
1113 if (use_wc)
1114 efx->membase = ioremap_wc(efx->membase_phys,
1115 efx->type->mem_map_size);
1116 else
1117 efx->membase = ioremap_nocache(efx->membase_phys,
1118 efx->type->mem_map_size);
1119 if (!efx->membase) { 1105 if (!efx->membase) {
1120 netif_err(efx, probe, efx->net_dev, 1106 netif_err(efx, probe, efx->net_dev,
1121 "could not map memory BAR at %llx+%x\n", 1107 "could not map memory BAR at %llx+%x\n",
diff --git a/drivers/net/sfc/io.h b/drivers/net/sfc/io.h
index cc978803d484..751d1ec112cc 100644
--- a/drivers/net/sfc/io.h
+++ b/drivers/net/sfc/io.h
@@ -103,7 +103,6 @@ static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value,
103 _efx_writed(efx, value->u32[2], reg + 8); 103 _efx_writed(efx, value->u32[2], reg + 8);
104 _efx_writed(efx, value->u32[3], reg + 12); 104 _efx_writed(efx, value->u32[3], reg + 12);
105#endif 105#endif
106 wmb();
107 mmiowb(); 106 mmiowb();
108 spin_unlock_irqrestore(&efx->biu_lock, flags); 107 spin_unlock_irqrestore(&efx->biu_lock, flags);
109} 108}
@@ -126,7 +125,6 @@ static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase,
126 __raw_writel((__force u32)value->u32[0], membase + addr); 125 __raw_writel((__force u32)value->u32[0], membase + addr);
127 __raw_writel((__force u32)value->u32[1], membase + addr + 4); 126 __raw_writel((__force u32)value->u32[1], membase + addr + 4);
128#endif 127#endif
129 wmb();
130 mmiowb(); 128 mmiowb();
131 spin_unlock_irqrestore(&efx->biu_lock, flags); 129 spin_unlock_irqrestore(&efx->biu_lock, flags);
132} 130}
@@ -141,7 +139,6 @@ static inline void efx_writed(struct efx_nic *efx, efx_dword_t *value,
141 139
142 /* No lock required */ 140 /* No lock required */
143 _efx_writed(efx, value->u32[0], reg); 141 _efx_writed(efx, value->u32[0], reg);
144 wmb();
145} 142}
146 143
147/* Read a 128-bit CSR, locking as appropriate. */ 144/* Read a 128-bit CSR, locking as appropriate. */
@@ -152,7 +149,6 @@ static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value,
152 149
153 spin_lock_irqsave(&efx->biu_lock, flags); 150 spin_lock_irqsave(&efx->biu_lock, flags);
154 value->u32[0] = _efx_readd(efx, reg + 0); 151 value->u32[0] = _efx_readd(efx, reg + 0);
155 rmb();
156 value->u32[1] = _efx_readd(efx, reg + 4); 152 value->u32[1] = _efx_readd(efx, reg + 4);
157 value->u32[2] = _efx_readd(efx, reg + 8); 153 value->u32[2] = _efx_readd(efx, reg + 8);
158 value->u32[3] = _efx_readd(efx, reg + 12); 154 value->u32[3] = _efx_readd(efx, reg + 12);
@@ -175,7 +171,6 @@ static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase,
175 value->u64[0] = (__force __le64)__raw_readq(membase + addr); 171 value->u64[0] = (__force __le64)__raw_readq(membase + addr);
176#else 172#else
177 value->u32[0] = (__force __le32)__raw_readl(membase + addr); 173 value->u32[0] = (__force __le32)__raw_readl(membase + addr);
178 rmb();
179 value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4); 174 value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4);
180#endif 175#endif
181 spin_unlock_irqrestore(&efx->biu_lock, flags); 176 spin_unlock_irqrestore(&efx->biu_lock, flags);
@@ -249,7 +244,6 @@ static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value,
249 _efx_writed(efx, value->u32[2], reg + 8); 244 _efx_writed(efx, value->u32[2], reg + 8);
250 _efx_writed(efx, value->u32[3], reg + 12); 245 _efx_writed(efx, value->u32[3], reg + 12);
251#endif 246#endif
252 wmb();
253} 247}
254#define efx_writeo_page(efx, value, reg, page) \ 248#define efx_writeo_page(efx, value, reg, page) \
255 _efx_writeo_page(efx, value, \ 249 _efx_writeo_page(efx, value, \
diff --git a/drivers/net/sfc/mcdi.c b/drivers/net/sfc/mcdi.c
index 3dd45ed61f0a..81a425397468 100644
--- a/drivers/net/sfc/mcdi.c
+++ b/drivers/net/sfc/mcdi.c
@@ -50,20 +50,6 @@ static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
50 return &nic_data->mcdi; 50 return &nic_data->mcdi;
51} 51}
52 52
53static inline void
54efx_mcdi_readd(struct efx_nic *efx, efx_dword_t *value, unsigned reg)
55{
56 struct siena_nic_data *nic_data = efx->nic_data;
57 value->u32[0] = (__force __le32)__raw_readl(nic_data->mcdi_smem + reg);
58}
59
60static inline void
61efx_mcdi_writed(struct efx_nic *efx, const efx_dword_t *value, unsigned reg)
62{
63 struct siena_nic_data *nic_data = efx->nic_data;
64 __raw_writel((__force u32)value->u32[0], nic_data->mcdi_smem + reg);
65}
66
67void efx_mcdi_init(struct efx_nic *efx) 53void efx_mcdi_init(struct efx_nic *efx)
68{ 54{
69 struct efx_mcdi_iface *mcdi; 55 struct efx_mcdi_iface *mcdi;
@@ -84,8 +70,8 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd,
84 const u8 *inbuf, size_t inlen) 70 const u8 *inbuf, size_t inlen)
85{ 71{
86 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 72 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
87 unsigned pdu = MCDI_PDU(efx); 73 unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
88 unsigned doorbell = MCDI_DOORBELL(efx); 74 unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx);
89 unsigned int i; 75 unsigned int i;
90 efx_dword_t hdr; 76 efx_dword_t hdr;
91 u32 xflags, seqno; 77 u32 xflags, seqno;
@@ -106,28 +92,29 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd,
106 MCDI_HEADER_SEQ, seqno, 92 MCDI_HEADER_SEQ, seqno,
107 MCDI_HEADER_XFLAGS, xflags); 93 MCDI_HEADER_XFLAGS, xflags);
108 94
109 efx_mcdi_writed(efx, &hdr, pdu); 95 efx_writed(efx, &hdr, pdu);
110 96
111 for (i = 0; i < inlen; i += 4) 97 for (i = 0; i < inlen; i += 4)
112 efx_mcdi_writed(efx, (const efx_dword_t *)(inbuf + i), 98 _efx_writed(efx, *((__le32 *)(inbuf + i)), pdu + 4 + i);
113 pdu + 4 + i); 99
100 /* Ensure the payload is written out before the header */
101 wmb();
114 102
115 /* ring the doorbell with a distinctive value */ 103 /* ring the doorbell with a distinctive value */
116 EFX_POPULATE_DWORD_1(hdr, EFX_DWORD_0, 0x45789abc); 104 _efx_writed(efx, (__force __le32) 0x45789abc, doorbell);
117 efx_mcdi_writed(efx, &hdr, doorbell);
118} 105}
119 106
120static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen) 107static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen)
121{ 108{
122 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 109 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
123 unsigned int pdu = MCDI_PDU(efx); 110 unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
124 int i; 111 int i;
125 112
126 BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT); 113 BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT);
127 BUG_ON(outlen & 3 || outlen >= 0x100); 114 BUG_ON(outlen & 3 || outlen >= 0x100);
128 115
129 for (i = 0; i < outlen; i += 4) 116 for (i = 0; i < outlen; i += 4)
130 efx_mcdi_readd(efx, (efx_dword_t *)(outbuf + i), pdu + 4 + i); 117 *((__le32 *)(outbuf + i)) = _efx_readd(efx, pdu + 4 + i);
131} 118}
132 119
133static int efx_mcdi_poll(struct efx_nic *efx) 120static int efx_mcdi_poll(struct efx_nic *efx)
@@ -135,7 +122,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
135 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 122 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
136 unsigned int time, finish; 123 unsigned int time, finish;
137 unsigned int respseq, respcmd, error; 124 unsigned int respseq, respcmd, error;
138 unsigned int pdu = MCDI_PDU(efx); 125 unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
139 unsigned int rc, spins; 126 unsigned int rc, spins;
140 efx_dword_t reg; 127 efx_dword_t reg;
141 128
@@ -161,7 +148,8 @@ static int efx_mcdi_poll(struct efx_nic *efx)
161 148
162 time = get_seconds(); 149 time = get_seconds();
163 150
164 efx_mcdi_readd(efx, &reg, pdu); 151 rmb();
152 efx_readd(efx, &reg, pdu);
165 153
166 /* All 1's indicates that shared memory is in reset (and is 154 /* All 1's indicates that shared memory is in reset (and is
167 * not a valid header). Wait for it to come out reset before 155 * not a valid header). Wait for it to come out reset before
@@ -188,7 +176,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
188 respseq, mcdi->seqno); 176 respseq, mcdi->seqno);
189 rc = EIO; 177 rc = EIO;
190 } else if (error) { 178 } else if (error) {
191 efx_mcdi_readd(efx, &reg, pdu + 4); 179 efx_readd(efx, &reg, pdu + 4);
192 switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) { 180 switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) {
193#define TRANSLATE_ERROR(name) \ 181#define TRANSLATE_ERROR(name) \
194 case MC_CMD_ERR_ ## name: \ 182 case MC_CMD_ERR_ ## name: \
@@ -222,21 +210,21 @@ out:
222/* Test and clear MC-rebooted flag for this port/function */ 210/* Test and clear MC-rebooted flag for this port/function */
223int efx_mcdi_poll_reboot(struct efx_nic *efx) 211int efx_mcdi_poll_reboot(struct efx_nic *efx)
224{ 212{
225 unsigned int addr = MCDI_REBOOT_FLAG(efx); 213 unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_REBOOT_FLAG(efx);
226 efx_dword_t reg; 214 efx_dword_t reg;
227 uint32_t value; 215 uint32_t value;
228 216
229 if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) 217 if (efx_nic_rev(efx) < EFX_REV_SIENA_A0)
230 return false; 218 return false;
231 219
232 efx_mcdi_readd(efx, &reg, addr); 220 efx_readd(efx, &reg, addr);
233 value = EFX_DWORD_FIELD(reg, EFX_DWORD_0); 221 value = EFX_DWORD_FIELD(reg, EFX_DWORD_0);
234 222
235 if (value == 0) 223 if (value == 0)
236 return 0; 224 return 0;
237 225
238 EFX_ZERO_DWORD(reg); 226 EFX_ZERO_DWORD(reg);
239 efx_mcdi_writed(efx, &reg, addr); 227 efx_writed(efx, &reg, addr);
240 228
241 if (value == MC_STATUS_DWORD_ASSERT) 229 if (value == MC_STATUS_DWORD_ASSERT)
242 return -EINTR; 230 return -EINTR;
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
index bafa23a6874c..3edfbaf5f022 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/sfc/nic.c
@@ -1936,13 +1936,6 @@ void efx_nic_get_regs(struct efx_nic *efx, void *buf)
1936 1936
1937 size = min_t(size_t, table->step, 16); 1937 size = min_t(size_t, table->step, 16);
1938 1938
1939 if (table->offset >= efx->type->mem_map_size) {
1940 /* No longer mapped; return dummy data */
1941 memcpy(buf, "\xde\xc0\xad\xde", 4);
1942 buf += table->rows * size;
1943 continue;
1944 }
1945
1946 for (i = 0; i < table->rows; i++) { 1939 for (i = 0; i < table->rows; i++) {
1947 switch (table->step) { 1940 switch (table->step) {
1948 case 4: /* 32-bit register or SRAM */ 1941 case 4: /* 32-bit register or SRAM */
diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h
index 4bd1f2839dfe..7443f99c977f 100644
--- a/drivers/net/sfc/nic.h
+++ b/drivers/net/sfc/nic.h
@@ -143,12 +143,10 @@ static inline struct falcon_board *falcon_board(struct efx_nic *efx)
143/** 143/**
144 * struct siena_nic_data - Siena NIC state 144 * struct siena_nic_data - Siena NIC state
145 * @mcdi: Management-Controller-to-Driver Interface 145 * @mcdi: Management-Controller-to-Driver Interface
146 * @mcdi_smem: MCDI shared memory mapping. The mapping is always uncacheable.
147 * @wol_filter_id: Wake-on-LAN packet filter id 146 * @wol_filter_id: Wake-on-LAN packet filter id
148 */ 147 */
149struct siena_nic_data { 148struct siena_nic_data {
150 struct efx_mcdi_iface mcdi; 149 struct efx_mcdi_iface mcdi;
151 void __iomem *mcdi_smem;
152 int wol_filter_id; 150 int wol_filter_id;
153}; 151};
154 152
diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c
index 5735e84c69de..2c3bd93fab54 100644
--- a/drivers/net/sfc/siena.c
+++ b/drivers/net/sfc/siena.c
@@ -250,26 +250,12 @@ static int siena_probe_nic(struct efx_nic *efx)
250 efx_reado(efx, &reg, FR_AZ_CS_DEBUG); 250 efx_reado(efx, &reg, FR_AZ_CS_DEBUG);
251 efx->net_dev->dev_id = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1; 251 efx->net_dev->dev_id = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1;
252 252
253 /* Initialise MCDI */
254 nic_data->mcdi_smem = ioremap_nocache(efx->membase_phys +
255 FR_CZ_MC_TREG_SMEM,
256 FR_CZ_MC_TREG_SMEM_STEP *
257 FR_CZ_MC_TREG_SMEM_ROWS);
258 if (!nic_data->mcdi_smem) {
259 netif_err(efx, probe, efx->net_dev,
260 "could not map MCDI at %llx+%x\n",
261 (unsigned long long)efx->membase_phys +
262 FR_CZ_MC_TREG_SMEM,
263 FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS);
264 rc = -ENOMEM;
265 goto fail1;
266 }
267 efx_mcdi_init(efx); 253 efx_mcdi_init(efx);
268 254
269 /* Recover from a failed assertion before probing */ 255 /* Recover from a failed assertion before probing */
270 rc = efx_mcdi_handle_assertion(efx); 256 rc = efx_mcdi_handle_assertion(efx);
271 if (rc) 257 if (rc)
272 goto fail2; 258 goto fail1;
273 259
274 /* Let the BMC know that the driver is now in charge of link and 260 /* Let the BMC know that the driver is now in charge of link and
275 * filter settings. We must do this before we reset the NIC */ 261 * filter settings. We must do this before we reset the NIC */
@@ -324,7 +310,6 @@ fail4:
324fail3: 310fail3:
325 efx_mcdi_drv_attach(efx, false, NULL); 311 efx_mcdi_drv_attach(efx, false, NULL);
326fail2: 312fail2:
327 iounmap(nic_data->mcdi_smem);
328fail1: 313fail1:
329 kfree(efx->nic_data); 314 kfree(efx->nic_data);
330 return rc; 315 return rc;
@@ -404,8 +389,6 @@ static int siena_init_nic(struct efx_nic *efx)
404 389
405static void siena_remove_nic(struct efx_nic *efx) 390static void siena_remove_nic(struct efx_nic *efx)
406{ 391{
407 struct siena_nic_data *nic_data = efx->nic_data;
408
409 efx_nic_free_buffer(efx, &efx->irq_status); 392 efx_nic_free_buffer(efx, &efx->irq_status);
410 393
411 siena_reset_hw(efx, RESET_TYPE_ALL); 394 siena_reset_hw(efx, RESET_TYPE_ALL);
@@ -415,8 +398,7 @@ static void siena_remove_nic(struct efx_nic *efx)
415 efx_mcdi_drv_attach(efx, false, NULL); 398 efx_mcdi_drv_attach(efx, false, NULL);
416 399
417 /* Tear down the private nic state */ 400 /* Tear down the private nic state */
418 iounmap(nic_data->mcdi_smem); 401 kfree(efx->nic_data);
419 kfree(nic_data);
420 efx->nic_data = NULL; 402 efx->nic_data = NULL;
421} 403}
422 404
@@ -656,7 +638,8 @@ const struct efx_nic_type siena_a0_nic_type = {
656 .default_mac_ops = &efx_mcdi_mac_operations, 638 .default_mac_ops = &efx_mcdi_mac_operations,
657 639
658 .revision = EFX_REV_SIENA_A0, 640 .revision = EFX_REV_SIENA_A0,
659 .mem_map_size = FR_CZ_MC_TREG_SMEM, /* MC_TREG_SMEM mapped separately */ 641 .mem_map_size = (FR_CZ_MC_TREG_SMEM +
642 FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS),
660 .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, 643 .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
661 .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL, 644 .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
662 .buf_tbl_base = FR_BZ_BUF_FULL_TBL, 645 .buf_tbl_base = FR_BZ_BUF_FULL_TBL,
diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h
index 99ff11400cef..e4dd3a7f304b 100644
--- a/drivers/net/sfc/workarounds.h
+++ b/drivers/net/sfc/workarounds.h
@@ -38,8 +38,6 @@
38#define EFX_WORKAROUND_15783 EFX_WORKAROUND_ALWAYS 38#define EFX_WORKAROUND_15783 EFX_WORKAROUND_ALWAYS
39/* Legacy interrupt storm when interrupt fifo fills */ 39/* Legacy interrupt storm when interrupt fifo fills */
40#define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA 40#define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA
41/* Write combining and sriov=enabled are incompatible */
42#define EFX_WORKAROUND_22643 EFX_WORKAROUND_SIENA
43 41
44/* Spurious parity errors in TSORT buffers */ 42/* Spurious parity errors in TSORT buffers */
45#define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A 43#define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index ad35c210b839..1c1666e99106 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -21,6 +21,7 @@
21 */ 21 */
22 22
23#include <linux/init.h> 23#include <linux/init.h>
24#include <linux/interrupt.h>
24#include <linux/dma-mapping.h> 25#include <linux/dma-mapping.h>
25#include <linux/etherdevice.h> 26#include <linux/etherdevice.h>
26#include <linux/delay.h> 27#include <linux/delay.h>
@@ -30,6 +31,7 @@
30#include <linux/phy.h> 31#include <linux/phy.h>
31#include <linux/cache.h> 32#include <linux/cache.h>
32#include <linux/io.h> 33#include <linux/io.h>
34#include <linux/interrupt.h>
33#include <linux/pm_runtime.h> 35#include <linux/pm_runtime.h>
34#include <linux/slab.h> 36#include <linux/slab.h>
35#include <linux/ethtool.h> 37#include <linux/ethtool.h>
diff --git a/drivers/net/slip.c b/drivers/net/slip.c
index f11b3f3df24f..4c617534f937 100644
--- a/drivers/net/slip.c
+++ b/drivers/net/slip.c
@@ -367,7 +367,7 @@ static void sl_bump(struct slip *sl)
367 memcpy(skb_put(skb, count), sl->rbuff, count); 367 memcpy(skb_put(skb, count), sl->rbuff, count);
368 skb_reset_mac_header(skb); 368 skb_reset_mac_header(skb);
369 skb->protocol = htons(ETH_P_IP); 369 skb->protocol = htons(ETH_P_IP);
370 netif_rx(skb); 370 netif_rx_ni(skb);
371 dev->stats.rx_packets++; 371 dev->stats.rx_packets++;
372} 372}
373 373
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index dc3fbf61910b..4a1374df6084 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -6234,12 +6234,10 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6234 } 6234 }
6235 } 6235 }
6236 6236
6237#ifdef BCM_KERNEL_SUPPORTS_8021Q
6238 if (vlan_tx_tag_present(skb)) { 6237 if (vlan_tx_tag_present(skb)) {
6239 base_flags |= TXD_FLAG_VLAN; 6238 base_flags |= TXD_FLAG_VLAN;
6240 vlan = vlan_tx_tag_get(skb); 6239 vlan = vlan_tx_tag_get(skb);
6241 } 6240 }
6242#endif
6243 6241
6244 if (tg3_flag(tp, USE_JUMBO_BDFLAG) && 6242 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6245 !mss && skb->len > VLAN_ETH_FRAME_LEN) 6243 !mss && skb->len > VLAN_ETH_FRAME_LEN)
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index a03336e086d5..f06fb78383a1 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -228,23 +228,40 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
228 if (ctx->rx_max != le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize)) { 228 if (ctx->rx_max != le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize)) {
229 229
230 if (flags & USB_CDC_NCM_NCAP_NTB_INPUT_SIZE) { 230 if (flags & USB_CDC_NCM_NCAP_NTB_INPUT_SIZE) {
231 struct usb_cdc_ncm_ndp_input_size ndp_in_sz; 231 struct usb_cdc_ncm_ndp_input_size *ndp_in_sz;
232
233 ndp_in_sz = kzalloc(sizeof(*ndp_in_sz), GFP_KERNEL);
234 if (!ndp_in_sz) {
235 err = -ENOMEM;
236 goto size_err;
237 }
238
232 err = usb_control_msg(ctx->udev, 239 err = usb_control_msg(ctx->udev,
233 usb_sndctrlpipe(ctx->udev, 0), 240 usb_sndctrlpipe(ctx->udev, 0),
234 USB_CDC_SET_NTB_INPUT_SIZE, 241 USB_CDC_SET_NTB_INPUT_SIZE,
235 USB_TYPE_CLASS | USB_DIR_OUT 242 USB_TYPE_CLASS | USB_DIR_OUT
236 | USB_RECIP_INTERFACE, 243 | USB_RECIP_INTERFACE,
237 0, iface_no, &ndp_in_sz, 8, 1000); 244 0, iface_no, ndp_in_sz, 8, 1000);
245 kfree(ndp_in_sz);
238 } else { 246 } else {
239 __le32 dwNtbInMaxSize = cpu_to_le32(ctx->rx_max); 247 __le32 *dwNtbInMaxSize;
248 dwNtbInMaxSize = kzalloc(sizeof(*dwNtbInMaxSize),
249 GFP_KERNEL);
250 if (!dwNtbInMaxSize) {
251 err = -ENOMEM;
252 goto size_err;
253 }
254 *dwNtbInMaxSize = cpu_to_le32(ctx->rx_max);
255
240 err = usb_control_msg(ctx->udev, 256 err = usb_control_msg(ctx->udev,
241 usb_sndctrlpipe(ctx->udev, 0), 257 usb_sndctrlpipe(ctx->udev, 0),
242 USB_CDC_SET_NTB_INPUT_SIZE, 258 USB_CDC_SET_NTB_INPUT_SIZE,
243 USB_TYPE_CLASS | USB_DIR_OUT 259 USB_TYPE_CLASS | USB_DIR_OUT
244 | USB_RECIP_INTERFACE, 260 | USB_RECIP_INTERFACE,
245 0, iface_no, &dwNtbInMaxSize, 4, 1000); 261 0, iface_no, dwNtbInMaxSize, 4, 1000);
262 kfree(dwNtbInMaxSize);
246 } 263 }
247 264size_err:
248 if (err < 0) 265 if (err < 0)
249 pr_debug("Setting NTB Input Size failed\n"); 266 pr_debug("Setting NTB Input Size failed\n");
250 } 267 }
@@ -325,19 +342,29 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
325 342
326 /* set Max Datagram Size (MTU) */ 343 /* set Max Datagram Size (MTU) */
327 if (flags & USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE) { 344 if (flags & USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE) {
328 __le16 max_datagram_size; 345 __le16 *max_datagram_size;
329 u16 eth_max_sz = le16_to_cpu(ctx->ether_desc->wMaxSegmentSize); 346 u16 eth_max_sz = le16_to_cpu(ctx->ether_desc->wMaxSegmentSize);
347
348 max_datagram_size = kzalloc(sizeof(*max_datagram_size),
349 GFP_KERNEL);
350 if (!max_datagram_size) {
351 err = -ENOMEM;
352 goto max_dgram_err;
353 }
354
330 err = usb_control_msg(ctx->udev, usb_rcvctrlpipe(ctx->udev, 0), 355 err = usb_control_msg(ctx->udev, usb_rcvctrlpipe(ctx->udev, 0),
331 USB_CDC_GET_MAX_DATAGRAM_SIZE, 356 USB_CDC_GET_MAX_DATAGRAM_SIZE,
332 USB_TYPE_CLASS | USB_DIR_IN 357 USB_TYPE_CLASS | USB_DIR_IN
333 | USB_RECIP_INTERFACE, 358 | USB_RECIP_INTERFACE,
334 0, iface_no, &max_datagram_size, 359 0, iface_no, max_datagram_size,
335 2, 1000); 360 2, 1000);
336 if (err < 0) { 361 if (err < 0) {
337 pr_debug("GET_MAX_DATAGRAM_SIZE failed, use size=%u\n", 362 pr_debug("GET_MAX_DATAGRAM_SIZE failed, use size=%u\n",
338 CDC_NCM_MIN_DATAGRAM_SIZE); 363 CDC_NCM_MIN_DATAGRAM_SIZE);
364 kfree(max_datagram_size);
339 } else { 365 } else {
340 ctx->max_datagram_size = le16_to_cpu(max_datagram_size); 366 ctx->max_datagram_size =
367 le16_to_cpu(*max_datagram_size);
341 /* Check Eth descriptor value */ 368 /* Check Eth descriptor value */
342 if (eth_max_sz < CDC_NCM_MAX_DATAGRAM_SIZE) { 369 if (eth_max_sz < CDC_NCM_MAX_DATAGRAM_SIZE) {
343 if (ctx->max_datagram_size > eth_max_sz) 370 if (ctx->max_datagram_size > eth_max_sz)
@@ -360,8 +387,10 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
360 USB_TYPE_CLASS | USB_DIR_OUT 387 USB_TYPE_CLASS | USB_DIR_OUT
361 | USB_RECIP_INTERFACE, 388 | USB_RECIP_INTERFACE,
362 0, 389 0,
363 iface_no, &max_datagram_size, 390 iface_no, max_datagram_size,
364 2, 1000); 391 2, 1000);
392 kfree(max_datagram_size);
393max_dgram_err:
365 if (err < 0) 394 if (err < 0)
366 pr_debug("SET_MAX_DATAGRAM_SIZE failed\n"); 395 pr_debug("SET_MAX_DATAGRAM_SIZE failed\n");
367 } 396 }
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index 15772b1b6a91..13c1f044b40d 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -59,6 +59,7 @@
59#define USB_PRODUCT_IPHONE_3G 0x1292 59#define USB_PRODUCT_IPHONE_3G 0x1292
60#define USB_PRODUCT_IPHONE_3GS 0x1294 60#define USB_PRODUCT_IPHONE_3GS 0x1294
61#define USB_PRODUCT_IPHONE_4 0x1297 61#define USB_PRODUCT_IPHONE_4 0x1297
62#define USB_PRODUCT_IPHONE_4_VZW 0x129c
62 63
63#define IPHETH_USBINTF_CLASS 255 64#define IPHETH_USBINTF_CLASS 255
64#define IPHETH_USBINTF_SUBCLASS 253 65#define IPHETH_USBINTF_SUBCLASS 253
@@ -98,6 +99,10 @@ static struct usb_device_id ipheth_table[] = {
98 USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4, 99 USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4,
99 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, 100 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
100 IPHETH_USBINTF_PROTO) }, 101 IPHETH_USBINTF_PROTO) },
102 { USB_DEVICE_AND_INTERFACE_INFO(
103 USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4_VZW,
104 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
105 IPHETH_USBINTF_PROTO) },
101 { } 106 { }
102}; 107};
103MODULE_DEVICE_TABLE(usb, ipheth_table); 108MODULE_DEVICE_TABLE(usb, ipheth_table);
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index 041fb7d43c4f..ef3b236b5145 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -977,7 +977,6 @@ static void rtl8150_disconnect(struct usb_interface *intf)
977 usb_set_intfdata(intf, NULL); 977 usb_set_intfdata(intf, NULL);
978 if (dev) { 978 if (dev) {
979 set_bit(RTL8150_UNPLUG, &dev->flags); 979 set_bit(RTL8150_UNPLUG, &dev->flags);
980 tasklet_disable(&dev->tl);
981 tasklet_kill(&dev->tl); 980 tasklet_kill(&dev->tl);
982 unregister_netdev(dev->netdev); 981 unregister_netdev(dev->netdev);
983 unlink_all_urbs(dev); 982 unlink_all_urbs(dev);
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index deb1eca13c9f..7c5336c5c37f 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -515,10 +515,6 @@ static void velocity_init_cam_filter(struct velocity_info *vptr)
515 mac_set_cam_mask(regs, vptr->mCAMmask); 515 mac_set_cam_mask(regs, vptr->mCAMmask);
516 516
517 /* Enable VCAMs */ 517 /* Enable VCAMs */
518
519 if (test_bit(0, vptr->active_vlans))
520 WORD_REG_BITS_ON(MCFG_RTGOPT, &regs->MCFG);
521
522 for_each_set_bit(vid, vptr->active_vlans, VLAN_N_VID) { 518 for_each_set_bit(vid, vptr->active_vlans, VLAN_N_VID) {
523 mac_set_vlan_cam(regs, i, (u8 *) &vid); 519 mac_set_vlan_cam(regs, i, (u8 *) &vid);
524 vptr->vCAMmask[i / 8] |= 0x1 << (i % 8); 520 vptr->vCAMmask[i / 8] |= 0x1 << (i % 8);
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 1cbacb389652..0959583feb27 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1929,14 +1929,17 @@ static void
1929vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid) 1929vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1930{ 1930{
1931 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1931 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1932 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1933 unsigned long flags;
1934 1932
1935 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid); 1933 if (!(netdev->flags & IFF_PROMISC)) {
1936 spin_lock_irqsave(&adapter->cmd_lock, flags); 1934 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1937 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1935 unsigned long flags;
1938 VMXNET3_CMD_UPDATE_VLAN_FILTERS); 1936
1939 spin_unlock_irqrestore(&adapter->cmd_lock, flags); 1937 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
1938 spin_lock_irqsave(&adapter->cmd_lock, flags);
1939 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1940 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1941 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
1942 }
1940 1943
1941 set_bit(vid, adapter->active_vlans); 1944 set_bit(vid, adapter->active_vlans);
1942} 1945}
@@ -1946,14 +1949,17 @@ static void
1946vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) 1949vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1947{ 1950{
1948 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1951 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1949 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1950 unsigned long flags;
1951 1952
1952 VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid); 1953 if (!(netdev->flags & IFF_PROMISC)) {
1953 spin_lock_irqsave(&adapter->cmd_lock, flags); 1954 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1954 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1955 unsigned long flags;
1955 VMXNET3_CMD_UPDATE_VLAN_FILTERS); 1956
1956 spin_unlock_irqrestore(&adapter->cmd_lock, flags); 1957 VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid);
1958 spin_lock_irqsave(&adapter->cmd_lock, flags);
1959 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1960 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1961 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
1962 }
1957 1963
1958 clear_bit(vid, adapter->active_vlans); 1964 clear_bit(vid, adapter->active_vlans);
1959} 1965}
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index f54dff44ed50..c3119a6caace 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -1735,6 +1735,8 @@ ath5k_beacon_setup(struct ath5k_hw *ah, struct ath5k_buf *bf)
1735 1735
1736 if (dma_mapping_error(ah->dev, bf->skbaddr)) { 1736 if (dma_mapping_error(ah->dev, bf->skbaddr)) {
1737 ATH5K_ERR(ah, "beacon DMA mapping failed\n"); 1737 ATH5K_ERR(ah, "beacon DMA mapping failed\n");
1738 dev_kfree_skb_any(skb);
1739 bf->skb = NULL;
1738 return -EIO; 1740 return -EIO;
1739 } 1741 }
1740 1742
@@ -1819,8 +1821,6 @@ ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1819 ath5k_txbuf_free_skb(ah, avf->bbuf); 1821 ath5k_txbuf_free_skb(ah, avf->bbuf);
1820 avf->bbuf->skb = skb; 1822 avf->bbuf->skb = skb;
1821 ret = ath5k_beacon_setup(ah, avf->bbuf); 1823 ret = ath5k_beacon_setup(ah, avf->bbuf);
1822 if (ret)
1823 avf->bbuf->skb = NULL;
1824out: 1824out:
1825 return ret; 1825 return ret;
1826} 1826}
@@ -1840,6 +1840,7 @@ ath5k_beacon_send(struct ath5k_hw *ah)
1840 struct ath5k_vif *avf; 1840 struct ath5k_vif *avf;
1841 struct ath5k_buf *bf; 1841 struct ath5k_buf *bf;
1842 struct sk_buff *skb; 1842 struct sk_buff *skb;
1843 int err;
1843 1844
1844 ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON, "in beacon_send\n"); 1845 ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON, "in beacon_send\n");
1845 1846
@@ -1888,11 +1889,6 @@ ath5k_beacon_send(struct ath5k_hw *ah)
1888 1889
1889 avf = (void *)vif->drv_priv; 1890 avf = (void *)vif->drv_priv;
1890 bf = avf->bbuf; 1891 bf = avf->bbuf;
1891 if (unlikely(bf->skb == NULL || ah->opmode == NL80211_IFTYPE_STATION ||
1892 ah->opmode == NL80211_IFTYPE_MONITOR)) {
1893 ATH5K_WARN(ah, "bf=%p bf_skb=%p\n", bf, bf ? bf->skb : NULL);
1894 return;
1895 }
1896 1892
1897 /* 1893 /*
1898 * Stop any current dma and put the new frame on the queue. 1894 * Stop any current dma and put the new frame on the queue.
@@ -1906,8 +1902,17 @@ ath5k_beacon_send(struct ath5k_hw *ah)
1906 1902
1907 /* refresh the beacon for AP or MESH mode */ 1903 /* refresh the beacon for AP or MESH mode */
1908 if (ah->opmode == NL80211_IFTYPE_AP || 1904 if (ah->opmode == NL80211_IFTYPE_AP ||
1909 ah->opmode == NL80211_IFTYPE_MESH_POINT) 1905 ah->opmode == NL80211_IFTYPE_MESH_POINT) {
1910 ath5k_beacon_update(ah->hw, vif); 1906 err = ath5k_beacon_update(ah->hw, vif);
1907 if (err)
1908 return;
1909 }
1910
1911 if (unlikely(bf->skb == NULL || ah->opmode == NL80211_IFTYPE_STATION ||
1912 ah->opmode == NL80211_IFTYPE_MONITOR)) {
1913 ATH5K_WARN(ah, "bf=%p bf_skb=%p\n", bf, bf->skb);
1914 return;
1915 }
1911 1916
1912 trace_ath5k_tx(ah, bf->skb, &ah->txqs[ah->bhalq]); 1917 trace_ath5k_tx(ah, bf->skb, &ah->txqs[ah->bhalq]);
1913 1918
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
index 2d4c0910295b..2d394af82171 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
@@ -41,7 +41,8 @@ static bool ar9002_hw_is_cal_supported(struct ath_hw *ah,
41 case ADC_DC_CAL: 41 case ADC_DC_CAL:
42 /* Run ADC Gain Cal for non-CCK & non 2GHz-HT20 only */ 42 /* Run ADC Gain Cal for non-CCK & non 2GHz-HT20 only */
43 if (!IS_CHAN_B(chan) && 43 if (!IS_CHAN_B(chan) &&
44 !(IS_CHAN_2GHZ(chan) && IS_CHAN_HT20(chan))) 44 !((IS_CHAN_2GHZ(chan) || IS_CHAN_A_FAST_CLOCK(ah, chan)) &&
45 IS_CHAN_HT20(chan)))
45 supported = true; 46 supported = true;
46 break; 47 break;
47 } 48 }
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
index 2339728a7306..3e69c631ebb4 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
@@ -1514,7 +1514,7 @@ static const u32 ar9300_2p2_mac_core[][2] = {
1514 {0x00008258, 0x00000000}, 1514 {0x00008258, 0x00000000},
1515 {0x0000825c, 0x40000000}, 1515 {0x0000825c, 0x40000000},
1516 {0x00008260, 0x00080922}, 1516 {0x00008260, 0x00080922},
1517 {0x00008264, 0x9bc00010}, 1517 {0x00008264, 0x9d400010},
1518 {0x00008268, 0xffffffff}, 1518 {0x00008268, 0xffffffff},
1519 {0x0000826c, 0x0000ffff}, 1519 {0x0000826c, 0x0000ffff},
1520 {0x00008270, 0x00000000}, 1520 {0x00008270, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index d109c25417f4..1b9400371eaf 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -69,7 +69,7 @@ static int ar9003_hw_power_interpolate(int32_t x,
69static const struct ar9300_eeprom ar9300_default = { 69static const struct ar9300_eeprom ar9300_default = {
70 .eepromVersion = 2, 70 .eepromVersion = 2,
71 .templateVersion = 2, 71 .templateVersion = 2,
72 .macAddr = {1, 2, 3, 4, 5, 6}, 72 .macAddr = {0, 2, 3, 4, 5, 6},
73 .custData = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 73 .custData = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
74 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 74 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
75 .baseEepHeader = { 75 .baseEepHeader = {
@@ -307,7 +307,7 @@ static const struct ar9300_eeprom ar9300_default = {
307 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, 307 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
308 { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } }, 308 { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } },
309 309
310 { { CTL(60, 1), CTL(60, 0), CTL(0, 0), CTL(0, 0) } }, 310 { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0) } },
311 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, 311 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
312 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, 312 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
313 313
@@ -884,7 +884,7 @@ static const struct ar9300_eeprom ar9300_x113 = {
884 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, 884 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
885 { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } }, 885 { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } },
886 886
887 { { CTL(60, 1), CTL(60, 0), CTL(0, 0), CTL(0, 0) } }, 887 { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0) } },
888 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, 888 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
889 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, 889 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
890 890
@@ -2040,7 +2040,7 @@ static const struct ar9300_eeprom ar9300_x112 = {
2040 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, 2040 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
2041 { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } }, 2041 { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } },
2042 2042
2043 { { CTL(60, 1), CTL(60, 0), CTL(0, 0), CTL(0, 0) } }, 2043 { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0) } },
2044 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, 2044 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
2045 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, 2045 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
2046 2046
@@ -3734,7 +3734,7 @@ static void ar9003_hw_internal_regulator_apply(struct ath_hw *ah)
3734 } 3734 }
3735 } else { 3735 } else {
3736 reg_pmu_set = (5 << 1) | (7 << 4) | 3736 reg_pmu_set = (5 << 1) | (7 << 4) |
3737 (1 << 8) | (2 << 14) | 3737 (2 << 8) | (2 << 14) |
3738 (6 << 17) | (1 << 20) | 3738 (6 << 17) | (1 << 20) |
3739 (3 << 24) | (1 << 28); 3739 (3 << 24) | (1 << 28);
3740 } 3740 }
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index 1baca8e4715d..fcafec0605f4 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -671,7 +671,7 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
671 REG_WRITE_ARRAY(&ah->iniModesAdditional, 671 REG_WRITE_ARRAY(&ah->iniModesAdditional,
672 modesIndex, regWrites); 672 modesIndex, regWrites);
673 673
674 if (AR_SREV_9300(ah)) 674 if (AR_SREV_9330(ah))
675 REG_WRITE_ARRAY(&ah->iniModesAdditional, 1, regWrites); 675 REG_WRITE_ARRAY(&ah->iniModesAdditional, 1, regWrites);
676 676
677 if (AR_SREV_9340(ah) && !ah->is_clk_25mhz) 677 if (AR_SREV_9340(ah) && !ah->is_clk_25mhz)
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index 6de3f0bc18e6..5c590429f120 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -850,7 +850,7 @@
850#define AR_PHY_TPC_11_B1 (AR_SM1_BASE + 0x220) 850#define AR_PHY_TPC_11_B1 (AR_SM1_BASE + 0x220)
851#define AR_PHY_PDADC_TAB_1 (AR_SM1_BASE + 0x240) 851#define AR_PHY_PDADC_TAB_1 (AR_SM1_BASE + 0x240)
852#define AR_PHY_TX_IQCAL_STATUS_B1 (AR_SM1_BASE + 0x48c) 852#define AR_PHY_TX_IQCAL_STATUS_B1 (AR_SM1_BASE + 0x48c)
853#define AR_PHY_TX_IQCAL_CORR_COEFF_B1(_i) (AR_SM_BASE + 0x450 + ((_i) << 2)) 853#define AR_PHY_TX_IQCAL_CORR_COEFF_B1(_i) (AR_SM1_BASE + 0x450 + ((_i) << 2))
854 854
855/* 855/*
856 * Channel 2 Register Map 856 * Channel 2 Register Map
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 9098aaad97a9..722967b86cf1 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -2283,7 +2283,11 @@ static void ath9k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
2283 2283
2284 mutex_lock(&sc->mutex); 2284 mutex_lock(&sc->mutex);
2285 ah->coverage_class = coverage_class; 2285 ah->coverage_class = coverage_class;
2286
2287 ath9k_ps_wakeup(sc);
2286 ath9k_hw_init_global_settings(ah); 2288 ath9k_hw_init_global_settings(ah);
2289 ath9k_ps_restore(sc);
2290
2287 mutex_unlock(&sc->mutex); 2291 mutex_unlock(&sc->mutex);
2288} 2292}
2289 2293
@@ -2299,6 +2303,12 @@ static void ath9k_flush(struct ieee80211_hw *hw, bool drop)
2299 mutex_lock(&sc->mutex); 2303 mutex_lock(&sc->mutex);
2300 cancel_delayed_work_sync(&sc->tx_complete_work); 2304 cancel_delayed_work_sync(&sc->tx_complete_work);
2301 2305
2306 if (ah->ah_flags & AH_UNPLUGGED) {
2307 ath_dbg(common, ATH_DBG_ANY, "Device has been unplugged!\n");
2308 mutex_unlock(&sc->mutex);
2309 return;
2310 }
2311
2302 if (sc->sc_flags & SC_OP_INVALID) { 2312 if (sc->sc_flags & SC_OP_INVALID) {
2303 ath_dbg(common, ATH_DBG_ANY, "Device not present\n"); 2313 ath_dbg(common, ATH_DBG_ANY, "Device not present\n");
2304 mutex_unlock(&sc->mutex); 2314 mutex_unlock(&sc->mutex);
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 9a4850154fb2..4c21f8cbdeb5 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -205,14 +205,22 @@ static void ath_rx_remove_buffer(struct ath_softc *sc,
205 205
206static void ath_rx_edma_cleanup(struct ath_softc *sc) 206static void ath_rx_edma_cleanup(struct ath_softc *sc)
207{ 207{
208 struct ath_hw *ah = sc->sc_ah;
209 struct ath_common *common = ath9k_hw_common(ah);
208 struct ath_buf *bf; 210 struct ath_buf *bf;
209 211
210 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); 212 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
211 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); 213 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
212 214
213 list_for_each_entry(bf, &sc->rx.rxbuf, list) { 215 list_for_each_entry(bf, &sc->rx.rxbuf, list) {
214 if (bf->bf_mpdu) 216 if (bf->bf_mpdu) {
217 dma_unmap_single(sc->dev, bf->bf_buf_addr,
218 common->rx_bufsize,
219 DMA_BIDIRECTIONAL);
215 dev_kfree_skb_any(bf->bf_mpdu); 220 dev_kfree_skb_any(bf->bf_mpdu);
221 bf->bf_buf_addr = 0;
222 bf->bf_mpdu = NULL;
223 }
216 } 224 }
217 225
218 INIT_LIST_HEAD(&sc->rx.rxbuf); 226 INIT_LIST_HEAD(&sc->rx.rxbuf);
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 0122930b14c7..0474e6638d21 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -1066,8 +1066,10 @@ static int carl9170_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1066 * the high througput speed in 802.11n networks. 1066 * the high througput speed in 802.11n networks.
1067 */ 1067 */
1068 1068
1069 if (!is_main_vif(ar, vif)) 1069 if (!is_main_vif(ar, vif)) {
1070 mutex_lock(&ar->mutex);
1070 goto err_softw; 1071 goto err_softw;
1072 }
1071 1073
1072 /* 1074 /*
1073 * While the hardware supports *catch-all* key, for offloading 1075 * While the hardware supports *catch-all* key, for offloading
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index 83cba22ac6e8..481e534534eb 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -795,9 +795,23 @@ static u64 supported_dma_mask(struct b43_wldev *dev)
795 u32 tmp; 795 u32 tmp;
796 u16 mmio_base; 796 u16 mmio_base;
797 797
798 tmp = b43_read32(dev, SSB_TMSHIGH); 798 switch (dev->dev->bus_type) {
799 if (tmp & SSB_TMSHIGH_DMA64) 799#ifdef CONFIG_B43_BCMA
800 return DMA_BIT_MASK(64); 800 case B43_BUS_BCMA:
801 tmp = bcma_aread32(dev->dev->bdev, BCMA_IOST);
802 if (tmp & BCMA_IOST_DMA64)
803 return DMA_BIT_MASK(64);
804 break;
805#endif
806#ifdef CONFIG_B43_SSB
807 case B43_BUS_SSB:
808 tmp = ssb_read32(dev->dev->sdev, SSB_TMSHIGH);
809 if (tmp & SSB_TMSHIGH_DMA64)
810 return DMA_BIT_MASK(64);
811 break;
812#endif
813 }
814
801 mmio_base = b43_dmacontroller_base(0, 0); 815 mmio_base = b43_dmacontroller_base(0, 0);
802 b43_write32(dev, mmio_base + B43_DMA32_TXCTL, B43_DMA32_TXADDREXT_MASK); 816 b43_write32(dev, mmio_base + B43_DMA32_TXCTL, B43_DMA32_TXADDREXT_MASK);
803 tmp = b43_read32(dev, mmio_base + B43_DMA32_TXCTL); 817 tmp = b43_read32(dev, mmio_base + B43_DMA32_TXCTL);
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 26f1ab840cc7..e293a7921bf0 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -1632,7 +1632,8 @@ static void handle_irq_beacon(struct b43_wldev *dev)
1632 u32 cmd, beacon0_valid, beacon1_valid; 1632 u32 cmd, beacon0_valid, beacon1_valid;
1633 1633
1634 if (!b43_is_mode(wl, NL80211_IFTYPE_AP) && 1634 if (!b43_is_mode(wl, NL80211_IFTYPE_AP) &&
1635 !b43_is_mode(wl, NL80211_IFTYPE_MESH_POINT)) 1635 !b43_is_mode(wl, NL80211_IFTYPE_MESH_POINT) &&
1636 !b43_is_mode(wl, NL80211_IFTYPE_ADHOC))
1636 return; 1637 return;
1637 1638
1638 /* This is the bottom half of the asynchronous beacon update. */ 1639 /* This is the bottom half of the asynchronous beacon update. */
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 3774dd034746..ef9ad79d1bfd 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -1903,15 +1903,17 @@ static void ipw2100_down(struct ipw2100_priv *priv)
1903static int ipw2100_net_init(struct net_device *dev) 1903static int ipw2100_net_init(struct net_device *dev)
1904{ 1904{
1905 struct ipw2100_priv *priv = libipw_priv(dev); 1905 struct ipw2100_priv *priv = libipw_priv(dev);
1906
1907 return ipw2100_up(priv, 1);
1908}
1909
1910static int ipw2100_wdev_init(struct net_device *dev)
1911{
1912 struct ipw2100_priv *priv = libipw_priv(dev);
1906 const struct libipw_geo *geo = libipw_get_geo(priv->ieee); 1913 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
1907 struct wireless_dev *wdev = &priv->ieee->wdev; 1914 struct wireless_dev *wdev = &priv->ieee->wdev;
1908 int ret;
1909 int i; 1915 int i;
1910 1916
1911 ret = ipw2100_up(priv, 1);
1912 if (ret)
1913 return ret;
1914
1915 memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN); 1917 memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN);
1916 1918
1917 /* fill-out priv->ieee->bg_band */ 1919 /* fill-out priv->ieee->bg_band */
@@ -6350,9 +6352,13 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
6350 "Error calling register_netdev.\n"); 6352 "Error calling register_netdev.\n");
6351 goto fail; 6353 goto fail;
6352 } 6354 }
6355 registered = 1;
6356
6357 err = ipw2100_wdev_init(dev);
6358 if (err)
6359 goto fail;
6353 6360
6354 mutex_lock(&priv->action_mutex); 6361 mutex_lock(&priv->action_mutex);
6355 registered = 1;
6356 6362
6357 IPW_DEBUG_INFO("%s: Bound to %s\n", dev->name, pci_name(pci_dev)); 6363 IPW_DEBUG_INFO("%s: Bound to %s\n", dev->name, pci_name(pci_dev));
6358 6364
@@ -6389,7 +6395,8 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
6389 6395
6390 fail_unlock: 6396 fail_unlock:
6391 mutex_unlock(&priv->action_mutex); 6397 mutex_unlock(&priv->action_mutex);
6392 6398 wiphy_unregister(priv->ieee->wdev.wiphy);
6399 kfree(priv->ieee->bg_band.channels);
6393 fail: 6400 fail:
6394 if (dev) { 6401 if (dev) {
6395 if (registered) 6402 if (registered)
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 87813c33bdc2..4ffebede5e03 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -11425,16 +11425,23 @@ static void ipw_bg_down(struct work_struct *work)
11425/* Called by register_netdev() */ 11425/* Called by register_netdev() */
11426static int ipw_net_init(struct net_device *dev) 11426static int ipw_net_init(struct net_device *dev)
11427{ 11427{
11428 int rc = 0;
11429 struct ipw_priv *priv = libipw_priv(dev);
11430
11431 mutex_lock(&priv->mutex);
11432 if (ipw_up(priv))
11433 rc = -EIO;
11434 mutex_unlock(&priv->mutex);
11435
11436 return rc;
11437}
11438
11439static int ipw_wdev_init(struct net_device *dev)
11440{
11428 int i, rc = 0; 11441 int i, rc = 0;
11429 struct ipw_priv *priv = libipw_priv(dev); 11442 struct ipw_priv *priv = libipw_priv(dev);
11430 const struct libipw_geo *geo = libipw_get_geo(priv->ieee); 11443 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
11431 struct wireless_dev *wdev = &priv->ieee->wdev; 11444 struct wireless_dev *wdev = &priv->ieee->wdev;
11432 mutex_lock(&priv->mutex);
11433
11434 if (ipw_up(priv)) {
11435 rc = -EIO;
11436 goto out;
11437 }
11438 11445
11439 memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN); 11446 memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN);
11440 11447
@@ -11519,13 +11526,9 @@ static int ipw_net_init(struct net_device *dev)
11519 set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev); 11526 set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev);
11520 11527
11521 /* With that information in place, we can now register the wiphy... */ 11528 /* With that information in place, we can now register the wiphy... */
11522 if (wiphy_register(wdev->wiphy)) { 11529 if (wiphy_register(wdev->wiphy))
11523 rc = -EIO; 11530 rc = -EIO;
11524 goto out;
11525 }
11526
11527out: 11531out:
11528 mutex_unlock(&priv->mutex);
11529 return rc; 11532 return rc;
11530} 11533}
11531 11534
@@ -11832,14 +11835,22 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev,
11832 goto out_remove_sysfs; 11835 goto out_remove_sysfs;
11833 } 11836 }
11834 11837
11838 err = ipw_wdev_init(net_dev);
11839 if (err) {
11840 IPW_ERROR("failed to register wireless device\n");
11841 goto out_unregister_netdev;
11842 }
11843
11835#ifdef CONFIG_IPW2200_PROMISCUOUS 11844#ifdef CONFIG_IPW2200_PROMISCUOUS
11836 if (rtap_iface) { 11845 if (rtap_iface) {
11837 err = ipw_prom_alloc(priv); 11846 err = ipw_prom_alloc(priv);
11838 if (err) { 11847 if (err) {
11839 IPW_ERROR("Failed to register promiscuous network " 11848 IPW_ERROR("Failed to register promiscuous network "
11840 "device (error %d).\n", err); 11849 "device (error %d).\n", err);
11841 unregister_netdev(priv->net_dev); 11850 wiphy_unregister(priv->ieee->wdev.wiphy);
11842 goto out_remove_sysfs; 11851 kfree(priv->ieee->a_band.channels);
11852 kfree(priv->ieee->bg_band.channels);
11853 goto out_unregister_netdev;
11843 } 11854 }
11844 } 11855 }
11845#endif 11856#endif
@@ -11851,6 +11862,8 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev,
11851 11862
11852 return 0; 11863 return 0;
11853 11864
11865 out_unregister_netdev:
11866 unregister_netdev(priv->net_dev);
11854 out_remove_sysfs: 11867 out_remove_sysfs:
11855 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group); 11868 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11856 out_release_irq: 11869 out_release_irq:
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-rs.c b/drivers/net/wireless/iwlegacy/iwl-3945-rs.c
index 977bd2477c6a..164bcae821f8 100644
--- a/drivers/net/wireless/iwlegacy/iwl-3945-rs.c
+++ b/drivers/net/wireless/iwlegacy/iwl-3945-rs.c
@@ -822,12 +822,15 @@ static void iwl3945_rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
822 822
823 out: 823 out:
824 824
825 rs_sta->last_txrate_idx = index; 825 if (sband->band == IEEE80211_BAND_5GHZ) {
826 if (sband->band == IEEE80211_BAND_5GHZ) 826 if (WARN_ON_ONCE(index < IWL_FIRST_OFDM_RATE))
827 info->control.rates[0].idx = rs_sta->last_txrate_idx - 827 index = IWL_FIRST_OFDM_RATE;
828 IWL_FIRST_OFDM_RATE; 828 rs_sta->last_txrate_idx = index;
829 else 829 info->control.rates[0].idx = index - IWL_FIRST_OFDM_RATE;
830 } else {
831 rs_sta->last_txrate_idx = index;
830 info->control.rates[0].idx = rs_sta->last_txrate_idx; 832 info->control.rates[0].idx = rs_sta->last_txrate_idx;
833 }
831 834
832 IWL_DEBUG_RATE(priv, "leave: %d\n", index); 835 IWL_DEBUG_RATE(priv, "leave: %d\n", index);
833} 836}
diff --git a/drivers/net/wireless/iwlegacy/iwl-core.c b/drivers/net/wireless/iwlegacy/iwl-core.c
index 35cd2537e7fd..e5971fe9d169 100644
--- a/drivers/net/wireless/iwlegacy/iwl-core.c
+++ b/drivers/net/wireless/iwlegacy/iwl-core.c
@@ -937,7 +937,7 @@ void iwl_legacy_irq_handle_error(struct iwl_priv *priv)
937 &priv->contexts[IWL_RXON_CTX_BSS]); 937 &priv->contexts[IWL_RXON_CTX_BSS]);
938#endif 938#endif
939 939
940 wake_up_interruptible(&priv->wait_command_queue); 940 wake_up(&priv->wait_command_queue);
941 941
942 /* Keep the restart process from trying to send host 942 /* Keep the restart process from trying to send host
943 * commands by clearing the INIT status bit */ 943 * commands by clearing the INIT status bit */
@@ -1746,7 +1746,7 @@ int iwl_legacy_force_reset(struct iwl_priv *priv, bool external)
1746 1746
1747 /* Set the FW error flag -- cleared on iwl_down */ 1747 /* Set the FW error flag -- cleared on iwl_down */
1748 set_bit(STATUS_FW_ERROR, &priv->status); 1748 set_bit(STATUS_FW_ERROR, &priv->status);
1749 wake_up_interruptible(&priv->wait_command_queue); 1749 wake_up(&priv->wait_command_queue);
1750 /* 1750 /*
1751 * Keep the restart process from trying to send host 1751 * Keep the restart process from trying to send host
1752 * commands by clearing the INIT status bit 1752 * commands by clearing the INIT status bit
diff --git a/drivers/net/wireless/iwlegacy/iwl-hcmd.c b/drivers/net/wireless/iwlegacy/iwl-hcmd.c
index 62b4b09122cb..ce1fc9feb61f 100644
--- a/drivers/net/wireless/iwlegacy/iwl-hcmd.c
+++ b/drivers/net/wireless/iwlegacy/iwl-hcmd.c
@@ -167,7 +167,7 @@ int iwl_legacy_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
167 goto out; 167 goto out;
168 } 168 }
169 169
170 ret = wait_event_interruptible_timeout(priv->wait_command_queue, 170 ret = wait_event_timeout(priv->wait_command_queue,
171 !test_bit(STATUS_HCMD_ACTIVE, &priv->status), 171 !test_bit(STATUS_HCMD_ACTIVE, &priv->status),
172 HOST_COMPLETE_TIMEOUT); 172 HOST_COMPLETE_TIMEOUT);
173 if (!ret) { 173 if (!ret) {
diff --git a/drivers/net/wireless/iwlegacy/iwl-tx.c b/drivers/net/wireless/iwlegacy/iwl-tx.c
index 4fff995c6f3e..ef9e268bf8a0 100644
--- a/drivers/net/wireless/iwlegacy/iwl-tx.c
+++ b/drivers/net/wireless/iwlegacy/iwl-tx.c
@@ -625,6 +625,8 @@ iwl_legacy_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
625 cmd = txq->cmd[cmd_index]; 625 cmd = txq->cmd[cmd_index];
626 meta = &txq->meta[cmd_index]; 626 meta = &txq->meta[cmd_index];
627 627
628 txq->time_stamp = jiffies;
629
628 pci_unmap_single(priv->pci_dev, 630 pci_unmap_single(priv->pci_dev,
629 dma_unmap_addr(meta, mapping), 631 dma_unmap_addr(meta, mapping),
630 dma_unmap_len(meta, len), 632 dma_unmap_len(meta, len),
@@ -645,7 +647,7 @@ iwl_legacy_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
645 clear_bit(STATUS_HCMD_ACTIVE, &priv->status); 647 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
646 IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n", 648 IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n",
647 iwl_legacy_get_cmd_string(cmd->hdr.cmd)); 649 iwl_legacy_get_cmd_string(cmd->hdr.cmd));
648 wake_up_interruptible(&priv->wait_command_queue); 650 wake_up(&priv->wait_command_queue);
649 } 651 }
650 652
651 /* Mark as unmapped */ 653 /* Mark as unmapped */
diff --git a/drivers/net/wireless/iwlegacy/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c
index 795826a014ed..66ee15629a76 100644
--- a/drivers/net/wireless/iwlegacy/iwl3945-base.c
+++ b/drivers/net/wireless/iwlegacy/iwl3945-base.c
@@ -841,7 +841,7 @@ static void iwl3945_rx_card_state_notif(struct iwl_priv *priv,
841 wiphy_rfkill_set_hw_state(priv->hw->wiphy, 841 wiphy_rfkill_set_hw_state(priv->hw->wiphy,
842 test_bit(STATUS_RF_KILL_HW, &priv->status)); 842 test_bit(STATUS_RF_KILL_HW, &priv->status));
843 else 843 else
844 wake_up_interruptible(&priv->wait_command_queue); 844 wake_up(&priv->wait_command_queue);
845} 845}
846 846
847/** 847/**
@@ -2269,7 +2269,7 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
2269 iwl3945_reg_txpower_periodic(priv); 2269 iwl3945_reg_txpower_periodic(priv);
2270 2270
2271 IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n"); 2271 IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
2272 wake_up_interruptible(&priv->wait_command_queue); 2272 wake_up(&priv->wait_command_queue);
2273 2273
2274 return; 2274 return;
2275 2275
@@ -2300,7 +2300,7 @@ static void __iwl3945_down(struct iwl_priv *priv)
2300 iwl_legacy_clear_driver_stations(priv); 2300 iwl_legacy_clear_driver_stations(priv);
2301 2301
2302 /* Unblock any waiting calls */ 2302 /* Unblock any waiting calls */
2303 wake_up_interruptible_all(&priv->wait_command_queue); 2303 wake_up_all(&priv->wait_command_queue);
2304 2304
2305 /* Wipe out the EXIT_PENDING status bit if we are not actually 2305 /* Wipe out the EXIT_PENDING status bit if we are not actually
2306 * exiting the module */ 2306 * exiting the module */
@@ -2853,7 +2853,7 @@ static int iwl3945_mac_start(struct ieee80211_hw *hw)
2853 2853
2854 /* Wait for START_ALIVE from ucode. Otherwise callbacks from 2854 /* Wait for START_ALIVE from ucode. Otherwise callbacks from
2855 * mac80211 will not be run successfully. */ 2855 * mac80211 will not be run successfully. */
2856 ret = wait_event_interruptible_timeout(priv->wait_command_queue, 2856 ret = wait_event_timeout(priv->wait_command_queue,
2857 test_bit(STATUS_READY, &priv->status), 2857 test_bit(STATUS_READY, &priv->status),
2858 UCODE_READY_TIMEOUT); 2858 UCODE_READY_TIMEOUT);
2859 if (!ret) { 2859 if (!ret) {
diff --git a/drivers/net/wireless/iwlegacy/iwl4965-base.c b/drivers/net/wireless/iwlegacy/iwl4965-base.c
index 14334668034e..aa0c2539761e 100644
--- a/drivers/net/wireless/iwlegacy/iwl4965-base.c
+++ b/drivers/net/wireless/iwlegacy/iwl4965-base.c
@@ -576,7 +576,7 @@ static void iwl4965_rx_card_state_notif(struct iwl_priv *priv,
576 wiphy_rfkill_set_hw_state(priv->hw->wiphy, 576 wiphy_rfkill_set_hw_state(priv->hw->wiphy,
577 test_bit(STATUS_RF_KILL_HW, &priv->status)); 577 test_bit(STATUS_RF_KILL_HW, &priv->status));
578 else 578 else
579 wake_up_interruptible(&priv->wait_command_queue); 579 wake_up(&priv->wait_command_queue);
580} 580}
581 581
582/** 582/**
@@ -926,7 +926,7 @@ static void iwl4965_irq_tasklet(struct iwl_priv *priv)
926 handled |= CSR_INT_BIT_FH_TX; 926 handled |= CSR_INT_BIT_FH_TX;
927 /* Wake up uCode load routine, now that load is complete */ 927 /* Wake up uCode load routine, now that load is complete */
928 priv->ucode_write_complete = 1; 928 priv->ucode_write_complete = 1;
929 wake_up_interruptible(&priv->wait_command_queue); 929 wake_up(&priv->wait_command_queue);
930 } 930 }
931 931
932 if (inta & ~handled) { 932 if (inta & ~handled) {
@@ -1795,7 +1795,7 @@ static void iwl4965_alive_start(struct iwl_priv *priv)
1795 iwl4965_rf_kill_ct_config(priv); 1795 iwl4965_rf_kill_ct_config(priv);
1796 1796
1797 IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n"); 1797 IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
1798 wake_up_interruptible(&priv->wait_command_queue); 1798 wake_up(&priv->wait_command_queue);
1799 1799
1800 iwl_legacy_power_update_mode(priv, true); 1800 iwl_legacy_power_update_mode(priv, true);
1801 IWL_DEBUG_INFO(priv, "Updated power mode\n"); 1801 IWL_DEBUG_INFO(priv, "Updated power mode\n");
@@ -1828,7 +1828,7 @@ static void __iwl4965_down(struct iwl_priv *priv)
1828 iwl_legacy_clear_driver_stations(priv); 1828 iwl_legacy_clear_driver_stations(priv);
1829 1829
1830 /* Unblock any waiting calls */ 1830 /* Unblock any waiting calls */
1831 wake_up_interruptible_all(&priv->wait_command_queue); 1831 wake_up_all(&priv->wait_command_queue);
1832 1832
1833 /* Wipe out the EXIT_PENDING status bit if we are not actually 1833 /* Wipe out the EXIT_PENDING status bit if we are not actually
1834 * exiting the module */ 1834 * exiting the module */
@@ -2266,7 +2266,7 @@ int iwl4965_mac_start(struct ieee80211_hw *hw)
2266 2266
2267 /* Wait for START_ALIVE from Run Time ucode. Otherwise callbacks from 2267 /* Wait for START_ALIVE from Run Time ucode. Otherwise callbacks from
2268 * mac80211 will not be run successfully. */ 2268 * mac80211 will not be run successfully. */
2269 ret = wait_event_interruptible_timeout(priv->wait_command_queue, 2269 ret = wait_event_timeout(priv->wait_command_queue,
2270 test_bit(STATUS_READY, &priv->status), 2270 test_bit(STATUS_READY, &priv->status),
2271 UCODE_READY_TIMEOUT); 2271 UCODE_READY_TIMEOUT);
2272 if (!ret) { 2272 if (!ret) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
index a895a099d086..56211006a182 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
@@ -167,7 +167,7 @@ static int iwlagn_set_temperature_offset_calib(struct iwl_priv *priv)
167 167
168 memset(&cmd, 0, sizeof(cmd)); 168 memset(&cmd, 0, sizeof(cmd));
169 iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD); 169 iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD);
170 memcpy(&cmd.radio_sensor_offset, offset_calib, sizeof(offset_calib)); 170 memcpy(&cmd.radio_sensor_offset, offset_calib, sizeof(*offset_calib));
171 if (!(cmd.radio_sensor_offset)) 171 if (!(cmd.radio_sensor_offset))
172 cmd.radio_sensor_offset = DEFAULT_RADIO_SENSOR_OFFSET; 172 cmd.radio_sensor_offset = DEFAULT_RADIO_SENSOR_OFFSET;
173 173
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index b0ae4de7f083..f9c3cd95d614 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -2140,7 +2140,12 @@ static int iwl_mac_setup_register(struct iwl_priv *priv,
2140 IEEE80211_HW_SPECTRUM_MGMT | 2140 IEEE80211_HW_SPECTRUM_MGMT |
2141 IEEE80211_HW_REPORTS_TX_ACK_STATUS; 2141 IEEE80211_HW_REPORTS_TX_ACK_STATUS;
2142 2142
2143 /*
2144 * Including the following line will crash some AP's. This
2145 * workaround removes the stimulus which causes the crash until
2146 * the AP software can be fixed.
2143 hw->max_tx_aggregation_subframes = LINK_QUAL_AGG_FRAME_LIMIT_DEF; 2147 hw->max_tx_aggregation_subframes = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
2148 */
2144 2149
2145 hw->flags |= IEEE80211_HW_SUPPORTS_PS | 2150 hw->flags |= IEEE80211_HW_SUPPORTS_PS |
2146 IEEE80211_HW_SUPPORTS_DYNAMIC_PS; 2151 IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
diff --git a/drivers/net/wireless/iwlwifi/iwl-pci.c b/drivers/net/wireless/iwlwifi/iwl-pci.c
index 69d4ec467dca..2fdbffa079c1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-pci.c
+++ b/drivers/net/wireless/iwlwifi/iwl-pci.c
@@ -478,27 +478,22 @@ out_no_pci:
478 return err; 478 return err;
479} 479}
480 480
481static void iwl_pci_down(struct iwl_bus *bus)
482{
483 struct iwl_pci_bus *pci_bus = (struct iwl_pci_bus *) bus->bus_specific;
484
485 pci_disable_msi(pci_bus->pci_dev);
486 pci_iounmap(pci_bus->pci_dev, pci_bus->hw_base);
487 pci_release_regions(pci_bus->pci_dev);
488 pci_disable_device(pci_bus->pci_dev);
489 pci_set_drvdata(pci_bus->pci_dev, NULL);
490
491 kfree(bus);
492}
493
494static void __devexit iwl_pci_remove(struct pci_dev *pdev) 481static void __devexit iwl_pci_remove(struct pci_dev *pdev)
495{ 482{
496 struct iwl_priv *priv = pci_get_drvdata(pdev); 483 struct iwl_priv *priv = pci_get_drvdata(pdev);
497 void *bus_specific = priv->bus->bus_specific; 484 struct iwl_bus *bus = priv->bus;
485 struct iwl_pci_bus *pci_bus = IWL_BUS_GET_PCI_BUS(bus);
486 struct pci_dev *pci_dev = IWL_BUS_GET_PCI_DEV(bus);
498 487
499 iwl_remove(priv); 488 iwl_remove(priv);
500 489
501 iwl_pci_down(bus_specific); 490 pci_disable_msi(pci_dev);
491 pci_iounmap(pci_dev, pci_bus->hw_base);
492 pci_release_regions(pci_dev);
493 pci_disable_device(pci_dev);
494 pci_set_drvdata(pci_dev, NULL);
495
496 kfree(bus);
502} 497}
503 498
504#ifdef CONFIG_PM 499#ifdef CONFIG_PM
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index dd6937e97055..77e528f5db88 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -405,31 +405,33 @@ int iwl_mac_hw_scan(struct ieee80211_hw *hw,
405 405
406 mutex_lock(&priv->mutex); 406 mutex_lock(&priv->mutex);
407 407
408 if (test_bit(STATUS_SCANNING, &priv->status) &&
409 priv->scan_type != IWL_SCAN_NORMAL) {
410 IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
411 ret = -EAGAIN;
412 goto out_unlock;
413 }
414
415 /* mac80211 will only ask for one band at a time */
416 priv->scan_request = req;
417 priv->scan_vif = vif;
418
419 /* 408 /*
420 * If an internal scan is in progress, just set 409 * If an internal scan is in progress, just set
421 * up the scan_request as per above. 410 * up the scan_request as per above.
422 */ 411 */
423 if (priv->scan_type != IWL_SCAN_NORMAL) { 412 if (priv->scan_type != IWL_SCAN_NORMAL) {
424 IWL_DEBUG_SCAN(priv, "SCAN request during internal scan\n"); 413 IWL_DEBUG_SCAN(priv,
414 "SCAN request during internal scan - defer\n");
415 priv->scan_request = req;
416 priv->scan_vif = vif;
425 ret = 0; 417 ret = 0;
426 } else 418 } else {
419 priv->scan_request = req;
420 priv->scan_vif = vif;
421 /*
422 * mac80211 will only ask for one band at a time
423 * so using channels[0] here is ok
424 */
427 ret = iwl_scan_initiate(priv, vif, IWL_SCAN_NORMAL, 425 ret = iwl_scan_initiate(priv, vif, IWL_SCAN_NORMAL,
428 req->channels[0]->band); 426 req->channels[0]->band);
427 if (ret) {
428 priv->scan_request = NULL;
429 priv->scan_vif = NULL;
430 }
431 }
429 432
430 IWL_DEBUG_MAC80211(priv, "leave\n"); 433 IWL_DEBUG_MAC80211(priv, "leave\n");
431 434
432out_unlock:
433 mutex_unlock(&priv->mutex); 435 mutex_unlock(&priv->mutex);
434 436
435 return ret; 437 return ret;
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c
index a6b2b1db0b1d..222d410c586e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c
@@ -771,6 +771,8 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
771 cmd = txq->cmd[cmd_index]; 771 cmd = txq->cmd[cmd_index];
772 meta = &txq->meta[cmd_index]; 772 meta = &txq->meta[cmd_index];
773 773
774 txq->time_stamp = jiffies;
775
774 iwlagn_unmap_tfd(priv, meta, &txq->tfds[index], DMA_BIDIRECTIONAL); 776 iwlagn_unmap_tfd(priv, meta, &txq->tfds[index], DMA_BIDIRECTIONAL);
775 777
776 /* Input error checking is done when commands are added to queue. */ 778 /* Input error checking is done when commands are added to queue. */
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index ef67f6786a84..0019dfd8fb01 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -3697,14 +3697,15 @@ static void rt2800_efuse_read(struct rt2x00_dev *rt2x00dev, unsigned int i)
3697 rt2800_regbusy_read(rt2x00dev, EFUSE_CTRL, EFUSE_CTRL_KICK, &reg); 3697 rt2800_regbusy_read(rt2x00dev, EFUSE_CTRL, EFUSE_CTRL_KICK, &reg);
3698 3698
3699 /* Apparently the data is read from end to start */ 3699 /* Apparently the data is read from end to start */
3700 rt2800_register_read_lock(rt2x00dev, EFUSE_DATA3, 3700 rt2800_register_read_lock(rt2x00dev, EFUSE_DATA3, &reg);
3701 (u32 *)&rt2x00dev->eeprom[i]); 3701 /* The returned value is in CPU order, but eeprom is le */
3702 rt2800_register_read_lock(rt2x00dev, EFUSE_DATA2, 3702 rt2x00dev->eeprom[i] = cpu_to_le32(reg);
3703 (u32 *)&rt2x00dev->eeprom[i + 2]); 3703 rt2800_register_read_lock(rt2x00dev, EFUSE_DATA2, &reg);
3704 rt2800_register_read_lock(rt2x00dev, EFUSE_DATA1, 3704 *(u32 *)&rt2x00dev->eeprom[i + 2] = cpu_to_le32(reg);
3705 (u32 *)&rt2x00dev->eeprom[i + 4]); 3705 rt2800_register_read_lock(rt2x00dev, EFUSE_DATA1, &reg);
3706 rt2800_register_read_lock(rt2x00dev, EFUSE_DATA0, 3706 *(u32 *)&rt2x00dev->eeprom[i + 4] = cpu_to_le32(reg);
3707 (u32 *)&rt2x00dev->eeprom[i + 6]); 3707 rt2800_register_read_lock(rt2x00dev, EFUSE_DATA0, &reg);
3708 *(u32 *)&rt2x00dev->eeprom[i + 6] = cpu_to_le32(reg);
3708 3709
3709 mutex_unlock(&rt2x00dev->csr_mutex); 3710 mutex_unlock(&rt2x00dev->csr_mutex);
3710} 3711}
@@ -3870,19 +3871,23 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
3870 return -ENODEV; 3871 return -ENODEV;
3871 } 3872 }
3872 3873
3873 if (!rt2x00_rf(rt2x00dev, RF2820) && 3874 switch (rt2x00dev->chip.rf) {
3874 !rt2x00_rf(rt2x00dev, RF2850) && 3875 case RF2820:
3875 !rt2x00_rf(rt2x00dev, RF2720) && 3876 case RF2850:
3876 !rt2x00_rf(rt2x00dev, RF2750) && 3877 case RF2720:
3877 !rt2x00_rf(rt2x00dev, RF3020) && 3878 case RF2750:
3878 !rt2x00_rf(rt2x00dev, RF2020) && 3879 case RF3020:
3879 !rt2x00_rf(rt2x00dev, RF3021) && 3880 case RF2020:
3880 !rt2x00_rf(rt2x00dev, RF3022) && 3881 case RF3021:
3881 !rt2x00_rf(rt2x00dev, RF3052) && 3882 case RF3022:
3882 !rt2x00_rf(rt2x00dev, RF3320) && 3883 case RF3052:
3883 !rt2x00_rf(rt2x00dev, RF5370) && 3884 case RF3320:
3884 !rt2x00_rf(rt2x00dev, RF5390)) { 3885 case RF5370:
3885 ERROR(rt2x00dev, "Invalid RF chipset detected.\n"); 3886 case RF5390:
3887 break;
3888 default:
3889 ERROR(rt2x00dev, "Invalid RF chipset 0x%x detected.\n",
3890 rt2x00dev->chip.rf);
3886 return -ENODEV; 3891 return -ENODEV;
3887 } 3892 }
3888 3893
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 507559361d87..dbf501ca317f 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -464,6 +464,15 @@ static bool rt2800usb_txdone_entry_check(struct queue_entry *entry, u32 reg)
464 int wcid, ack, pid; 464 int wcid, ack, pid;
465 int tx_wcid, tx_ack, tx_pid; 465 int tx_wcid, tx_ack, tx_pid;
466 466
467 if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) ||
468 !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags)) {
469 WARNING(entry->queue->rt2x00dev,
470 "Data pending for entry %u in queue %u\n",
471 entry->entry_idx, entry->queue->qid);
472 cond_resched();
473 return false;
474 }
475
467 wcid = rt2x00_get_field32(reg, TX_STA_FIFO_WCID); 476 wcid = rt2x00_get_field32(reg, TX_STA_FIFO_WCID);
468 ack = rt2x00_get_field32(reg, TX_STA_FIFO_TX_ACK_REQUIRED); 477 ack = rt2x00_get_field32(reg, TX_STA_FIFO_TX_ACK_REQUIRED);
469 pid = rt2x00_get_field32(reg, TX_STA_FIFO_PID_TYPE); 478 pid = rt2x00_get_field32(reg, TX_STA_FIFO_PID_TYPE);
@@ -529,12 +538,11 @@ static void rt2800usb_txdone(struct rt2x00_dev *rt2x00dev)
529 entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE); 538 entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
530 if (rt2800usb_txdone_entry_check(entry, reg)) 539 if (rt2800usb_txdone_entry_check(entry, reg))
531 break; 540 break;
541 entry = NULL;
532 } 542 }
533 543
534 if (!entry || rt2x00queue_empty(queue)) 544 if (entry)
535 break; 545 rt2800_txdone_entry(entry, reg);
536
537 rt2800_txdone_entry(entry, reg);
538 } 546 }
539} 547}
540 548
@@ -558,8 +566,10 @@ static void rt2800usb_work_txdone(struct work_struct *work)
558 while (!rt2x00queue_empty(queue)) { 566 while (!rt2x00queue_empty(queue)) {
559 entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE); 567 entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
560 568
561 if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) 569 if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) ||
570 !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
562 break; 571 break;
572
563 if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags)) 573 if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags))
564 rt2x00lib_txdone_noinfo(entry, TXDONE_FAILURE); 574 rt2x00lib_txdone_noinfo(entry, TXDONE_FAILURE);
565 else if (rt2x00queue_status_timeout(entry)) 575 else if (rt2x00queue_status_timeout(entry))
@@ -921,6 +931,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
921 { USB_DEVICE(0x07d1, 0x3c16) }, 931 { USB_DEVICE(0x07d1, 0x3c16) },
922 /* Draytek */ 932 /* Draytek */
923 { USB_DEVICE(0x07fa, 0x7712) }, 933 { USB_DEVICE(0x07fa, 0x7712) },
934 /* DVICO */
935 { USB_DEVICE(0x0fe9, 0xb307) },
924 /* Edimax */ 936 /* Edimax */
925 { USB_DEVICE(0x7392, 0x7711) }, 937 { USB_DEVICE(0x7392, 0x7711) },
926 { USB_DEVICE(0x7392, 0x7717) }, 938 { USB_DEVICE(0x7392, 0x7717) },
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index b6b4542c2460..1e31050dafc9 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -262,23 +262,20 @@ static void rt2x00usb_interrupt_txdone(struct urb *urb)
262 struct queue_entry *entry = (struct queue_entry *)urb->context; 262 struct queue_entry *entry = (struct queue_entry *)urb->context;
263 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 263 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
264 264
265 if (!test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) 265 if (!test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
266 return; 266 return;
267
268 if (rt2x00dev->ops->lib->tx_dma_done)
269 rt2x00dev->ops->lib->tx_dma_done(entry);
270
271 /*
272 * Report the frame as DMA done
273 */
274 rt2x00lib_dmadone(entry);
275
276 /* 267 /*
277 * Check if the frame was correctly uploaded 268 * Check if the frame was correctly uploaded
278 */ 269 */
279 if (urb->status) 270 if (urb->status)
280 set_bit(ENTRY_DATA_IO_FAILED, &entry->flags); 271 set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
272 /*
273 * Report the frame as DMA done
274 */
275 rt2x00lib_dmadone(entry);
281 276
277 if (rt2x00dev->ops->lib->tx_dma_done)
278 rt2x00dev->ops->lib->tx_dma_done(entry);
282 /* 279 /*
283 * Schedule the delayed work for reading the TX status 280 * Schedule the delayed work for reading the TX status
284 * from the device. 281 * from the device.
@@ -874,18 +871,8 @@ int rt2x00usb_suspend(struct usb_interface *usb_intf, pm_message_t state)
874{ 871{
875 struct ieee80211_hw *hw = usb_get_intfdata(usb_intf); 872 struct ieee80211_hw *hw = usb_get_intfdata(usb_intf);
876 struct rt2x00_dev *rt2x00dev = hw->priv; 873 struct rt2x00_dev *rt2x00dev = hw->priv;
877 int retval;
878
879 retval = rt2x00lib_suspend(rt2x00dev, state);
880 if (retval)
881 return retval;
882 874
883 /* 875 return rt2x00lib_suspend(rt2x00dev, state);
884 * Decrease usbdev refcount.
885 */
886 usb_put_dev(interface_to_usbdev(usb_intf));
887
888 return 0;
889} 876}
890EXPORT_SYMBOL_GPL(rt2x00usb_suspend); 877EXPORT_SYMBOL_GPL(rt2x00usb_suspend);
891 878
@@ -894,8 +881,6 @@ int rt2x00usb_resume(struct usb_interface *usb_intf)
894 struct ieee80211_hw *hw = usb_get_intfdata(usb_intf); 881 struct ieee80211_hw *hw = usb_get_intfdata(usb_intf);
895 struct rt2x00_dev *rt2x00dev = hw->priv; 882 struct rt2x00_dev *rt2x00dev = hw->priv;
896 883
897 usb_get_dev(interface_to_usbdev(usb_intf));
898
899 return rt2x00lib_resume(rt2x00dev); 884 return rt2x00lib_resume(rt2x00dev);
900} 885}
901EXPORT_SYMBOL_GPL(rt2x00usb_resume); 886EXPORT_SYMBOL_GPL(rt2x00usb_resume);
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 6a93939f44e8..0baeb894f093 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -2420,6 +2420,7 @@ static struct usb_device_id rt73usb_device_table[] = {
2420 /* Buffalo */ 2420 /* Buffalo */
2421 { USB_DEVICE(0x0411, 0x00d8) }, 2421 { USB_DEVICE(0x0411, 0x00d8) },
2422 { USB_DEVICE(0x0411, 0x00d9) }, 2422 { USB_DEVICE(0x0411, 0x00d9) },
2423 { USB_DEVICE(0x0411, 0x00e6) },
2423 { USB_DEVICE(0x0411, 0x00f4) }, 2424 { USB_DEVICE(0x0411, 0x00f4) },
2424 { USB_DEVICE(0x0411, 0x0116) }, 2425 { USB_DEVICE(0x0411, 0x0116) },
2425 { USB_DEVICE(0x0411, 0x0119) }, 2426 { USB_DEVICE(0x0411, 0x0119) },
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
index 1bdc1aa305c0..04c4e9eb6ee6 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -610,6 +610,11 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
610 610
611 mac->link_state = MAC80211_NOLINK; 611 mac->link_state = MAC80211_NOLINK;
612 memset(mac->bssid, 0, 6); 612 memset(mac->bssid, 0, 6);
613
614 /* reset sec info */
615 rtl_cam_reset_sec_info(hw);
616
617 rtl_cam_reset_all_entry(hw);
613 mac->vendor = PEER_UNKNOWN; 618 mac->vendor = PEER_UNKNOWN;
614 619
615 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG, 620 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
@@ -1063,6 +1068,9 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1063 *or clear all entry here. 1068 *or clear all entry here.
1064 */ 1069 */
1065 rtl_cam_delete_one_entry(hw, mac_addr, key_idx); 1070 rtl_cam_delete_one_entry(hw, mac_addr, key_idx);
1071
1072 rtl_cam_reset_sec_info(hw);
1073
1066 break; 1074 break;
1067 default: 1075 default:
1068 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, 1076 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
index 942f7a3969a7..ef63c0df006a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -281,6 +281,8 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
281 {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817d, rtl92cu_hal_cfg)}, 281 {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817d, rtl92cu_hal_cfg)},
282 /* 8188CE-VAU USB minCard (b/g mode only) */ 282 /* 8188CE-VAU USB minCard (b/g mode only) */
283 {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817e, rtl92cu_hal_cfg)}, 283 {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817e, rtl92cu_hal_cfg)},
284 /* 8188RU in Alfa AWUS036NHR */
285 {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817f, rtl92cu_hal_cfg)},
284 /* 8188 Combo for BC4 */ 286 /* 8188 Combo for BC4 */
285 {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8754, rtl92cu_hal_cfg)}, 287 {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8754, rtl92cu_hal_cfg)},
286 288
@@ -303,20 +305,23 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
303 {RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/ 305 {RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/
304 /* HP - Lite-On ,8188CUS Slim Combo */ 306 /* HP - Lite-On ,8188CUS Slim Combo */
305 {RTL_USB_DEVICE(0x103c, 0x1629, rtl92cu_hal_cfg)}, 307 {RTL_USB_DEVICE(0x103c, 0x1629, rtl92cu_hal_cfg)},
308 {RTL_USB_DEVICE(0x13d3, 0x3357, rtl92cu_hal_cfg)}, /* AzureWave */
306 {RTL_USB_DEVICE(0x2001, 0x3308, rtl92cu_hal_cfg)}, /*D-Link - Alpha*/ 309 {RTL_USB_DEVICE(0x2001, 0x3308, rtl92cu_hal_cfg)}, /*D-Link - Alpha*/
307 {RTL_USB_DEVICE(0x2019, 0xab2a, rtl92cu_hal_cfg)}, /*Planex - Abocom*/ 310 {RTL_USB_DEVICE(0x2019, 0xab2a, rtl92cu_hal_cfg)}, /*Planex - Abocom*/
308 {RTL_USB_DEVICE(0x2019, 0xed17, rtl92cu_hal_cfg)}, /*PCI - Edimax*/ 311 {RTL_USB_DEVICE(0x2019, 0xed17, rtl92cu_hal_cfg)}, /*PCI - Edimax*/
309 {RTL_USB_DEVICE(0x20f4, 0x648b, rtl92cu_hal_cfg)}, /*TRENDnet - Cameo*/ 312 {RTL_USB_DEVICE(0x20f4, 0x648b, rtl92cu_hal_cfg)}, /*TRENDnet - Cameo*/
310 {RTL_USB_DEVICE(0x7392, 0x7811, rtl92cu_hal_cfg)}, /*Edimax - Edimax*/ 313 {RTL_USB_DEVICE(0x7392, 0x7811, rtl92cu_hal_cfg)}, /*Edimax - Edimax*/
311 {RTL_USB_DEVICE(0x3358, 0x13d3, rtl92cu_hal_cfg)}, /*Azwave 8188CE-VAU*/ 314 {RTL_USB_DEVICE(0x13d3, 0x3358, rtl92cu_hal_cfg)}, /*Azwave 8188CE-VAU*/
312 /* Russian customer -Azwave (8188CE-VAU b/g mode only) */ 315 /* Russian customer -Azwave (8188CE-VAU b/g mode only) */
313 {RTL_USB_DEVICE(0x3359, 0x13d3, rtl92cu_hal_cfg)}, 316 {RTL_USB_DEVICE(0x13d3, 0x3359, rtl92cu_hal_cfg)},
317 {RTL_USB_DEVICE(0x4855, 0x0090, rtl92cu_hal_cfg)}, /* Feixun */
318 {RTL_USB_DEVICE(0x4855, 0x0091, rtl92cu_hal_cfg)}, /* NetweeN-Feixun */
319 {RTL_USB_DEVICE(0x9846, 0x9041, rtl92cu_hal_cfg)}, /* Netgear Cameo */
314 320
315 /****** 8192CU ********/ 321 /****** 8192CU ********/
316 {RTL_USB_DEVICE(0x0586, 0x341f, rtl92cu_hal_cfg)}, /*Zyxel -Abocom*/ 322 {RTL_USB_DEVICE(0x0586, 0x341f, rtl92cu_hal_cfg)}, /*Zyxel -Abocom*/
317 {RTL_USB_DEVICE(0x07aa, 0x0056, rtl92cu_hal_cfg)}, /*ATKK-Gemtek*/ 323 {RTL_USB_DEVICE(0x07aa, 0x0056, rtl92cu_hal_cfg)}, /*ATKK-Gemtek*/
318 {RTL_USB_DEVICE(0x07b8, 0x8178, rtl92cu_hal_cfg)}, /*Funai -Abocom*/ 324 {RTL_USB_DEVICE(0x07b8, 0x8178, rtl92cu_hal_cfg)}, /*Funai -Abocom*/
319 {RTL_USB_DEVICE(0x07b8, 0x8178, rtl92cu_hal_cfg)}, /*Abocom -Abocom*/
320 {RTL_USB_DEVICE(0x2001, 0x3307, rtl92cu_hal_cfg)}, /*D-Link-Cameo*/ 325 {RTL_USB_DEVICE(0x2001, 0x3307, rtl92cu_hal_cfg)}, /*D-Link-Cameo*/
321 {RTL_USB_DEVICE(0x2001, 0x3309, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/ 326 {RTL_USB_DEVICE(0x2001, 0x3309, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
322 {RTL_USB_DEVICE(0x2001, 0x330a, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/ 327 {RTL_USB_DEVICE(0x2001, 0x330a, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
index 906e7aa55bc3..3e52a5496224 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
@@ -549,15 +549,16 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
549 (tcb_desc->rts_use_shortpreamble ? 1 : 0) 549 (tcb_desc->rts_use_shortpreamble ? 1 : 0)
550 : (tcb_desc->rts_use_shortgi ? 1 : 0))); 550 : (tcb_desc->rts_use_shortgi ? 1 : 0)));
551 if (mac->bw_40) { 551 if (mac->bw_40) {
552 if (tcb_desc->packet_bw) { 552 if (rate_flag & IEEE80211_TX_RC_DUP_DATA) {
553 SET_TX_DESC_DATA_BW(txdesc, 1); 553 SET_TX_DESC_DATA_BW(txdesc, 1);
554 SET_TX_DESC_DATA_SC(txdesc, 3); 554 SET_TX_DESC_DATA_SC(txdesc, 3);
555 } else if(rate_flag & IEEE80211_TX_RC_40_MHZ_WIDTH){
556 SET_TX_DESC_DATA_BW(txdesc, 1);
557 SET_TX_DESC_DATA_SC(txdesc, mac->cur_40_prime_sc);
555 } else { 558 } else {
556 SET_TX_DESC_DATA_BW(txdesc, 0); 559 SET_TX_DESC_DATA_BW(txdesc, 0);
557 if (rate_flag & IEEE80211_TX_RC_DUP_DATA) 560 SET_TX_DESC_DATA_SC(txdesc, 0);
558 SET_TX_DESC_DATA_SC(txdesc, 561 }
559 mac->cur_40_prime_sc);
560 }
561 } else { 562 } else {
562 SET_TX_DESC_DATA_BW(txdesc, 0); 563 SET_TX_DESC_DATA_BW(txdesc, 0);
563 SET_TX_DESC_DATA_SC(txdesc, 0); 564 SET_TX_DESC_DATA_SC(txdesc, 0);
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index 8b1cef0ffde6..4bf3cf457ef0 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -863,6 +863,7 @@ static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw, struct sk_buff *skb,
863 u8 tid = 0; 863 u8 tid = 0;
864 u16 seq_number = 0; 864 u16 seq_number = 0;
865 865
866 memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
866 if (ieee80211_is_auth(fc)) { 867 if (ieee80211_is_auth(fc)) {
867 RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, ("MAC80211_LINKING\n")); 868 RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, ("MAC80211_LINKING\n"));
868 rtl_ips_nic_on(hw); 869 rtl_ips_nic_on(hw);
diff --git a/drivers/net/wireless/wl1251/acx.c b/drivers/net/wireless/wl1251/acx.c
index ef8370edace7..ad87a1ac6462 100644
--- a/drivers/net/wireless/wl1251/acx.c
+++ b/drivers/net/wireless/wl1251/acx.c
@@ -140,8 +140,6 @@ int wl1251_acx_sleep_auth(struct wl1251 *wl, u8 sleep_auth)
140 auth->sleep_auth = sleep_auth; 140 auth->sleep_auth = sleep_auth;
141 141
142 ret = wl1251_cmd_configure(wl, ACX_SLEEP_AUTH, auth, sizeof(*auth)); 142 ret = wl1251_cmd_configure(wl, ACX_SLEEP_AUTH, auth, sizeof(*auth));
143 if (ret < 0)
144 return ret;
145 143
146out: 144out:
147 kfree(auth); 145 kfree(auth);
@@ -681,10 +679,8 @@ int wl1251_acx_cca_threshold(struct wl1251 *wl)
681 679
682 ret = wl1251_cmd_configure(wl, ACX_CCA_THRESHOLD, 680 ret = wl1251_cmd_configure(wl, ACX_CCA_THRESHOLD,
683 detection, sizeof(*detection)); 681 detection, sizeof(*detection));
684 if (ret < 0) { 682 if (ret < 0)
685 wl1251_warning("failed to set cca threshold: %d", ret); 683 wl1251_warning("failed to set cca threshold: %d", ret);
686 return ret;
687 }
688 684
689out: 685out:
690 kfree(detection); 686 kfree(detection);
diff --git a/drivers/net/wireless/wl1251/cmd.c b/drivers/net/wireless/wl1251/cmd.c
index 81f164bc4888..d14d69d733a0 100644
--- a/drivers/net/wireless/wl1251/cmd.c
+++ b/drivers/net/wireless/wl1251/cmd.c
@@ -241,7 +241,7 @@ int wl1251_cmd_data_path(struct wl1251 *wl, u8 channel, bool enable)
241 if (ret < 0) { 241 if (ret < 0) {
242 wl1251_error("tx %s cmd for channel %d failed", 242 wl1251_error("tx %s cmd for channel %d failed",
243 enable ? "start" : "stop", channel); 243 enable ? "start" : "stop", channel);
244 return ret; 244 goto out;
245 } 245 }
246 246
247 wl1251_debug(DEBUG_BOOT, "tx %s cmd channel %d", 247 wl1251_debug(DEBUG_BOOT, "tx %s cmd channel %d",
diff --git a/drivers/net/wireless/wl12xx/acx.c b/drivers/net/wireless/wl12xx/acx.c
index 7e33f1f4f3d4..34f6ab53e519 100644
--- a/drivers/net/wireless/wl12xx/acx.c
+++ b/drivers/net/wireless/wl12xx/acx.c
@@ -77,8 +77,6 @@ int wl1271_acx_sleep_auth(struct wl1271 *wl, u8 sleep_auth)
77 auth->sleep_auth = sleep_auth; 77 auth->sleep_auth = sleep_auth;
78 78
79 ret = wl1271_cmd_configure(wl, ACX_SLEEP_AUTH, auth, sizeof(*auth)); 79 ret = wl1271_cmd_configure(wl, ACX_SLEEP_AUTH, auth, sizeof(*auth));
80 if (ret < 0)
81 return ret;
82 80
83out: 81out:
84 kfree(auth); 82 kfree(auth);
@@ -624,10 +622,8 @@ int wl1271_acx_cca_threshold(struct wl1271 *wl)
624 622
625 ret = wl1271_cmd_configure(wl, ACX_CCA_THRESHOLD, 623 ret = wl1271_cmd_configure(wl, ACX_CCA_THRESHOLD,
626 detection, sizeof(*detection)); 624 detection, sizeof(*detection));
627 if (ret < 0) { 625 if (ret < 0)
628 wl1271_warning("failed to set cca threshold: %d", ret); 626 wl1271_warning("failed to set cca threshold: %d", ret);
629 return ret;
630 }
631 627
632out: 628out:
633 kfree(detection); 629 kfree(detection);
diff --git a/drivers/net/wireless/wl12xx/main.c b/drivers/net/wireless/wl12xx/main.c
index e58c22d21e39..b70ae40ad660 100644
--- a/drivers/net/wireless/wl12xx/main.c
+++ b/drivers/net/wireless/wl12xx/main.c
@@ -4283,6 +4283,7 @@ int wl1271_init_ieee80211(struct wl1271 *wl)
4283 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | 4283 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
4284 BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP); 4284 BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP);
4285 wl->hw->wiphy->max_scan_ssids = 1; 4285 wl->hw->wiphy->max_scan_ssids = 1;
4286 wl->hw->wiphy->max_sched_scan_ssids = 1;
4286 /* 4287 /*
4287 * Maximum length of elements in scanning probe request templates 4288 * Maximum length of elements in scanning probe request templates
4288 * should be the maximum length possible for a template, without 4289 * should be the maximum length possible for a template, without
diff --git a/drivers/net/wireless/wl12xx/sdio.c b/drivers/net/wireless/wl12xx/sdio.c
index 5cf18c2c23f0..fb1fd5af75ea 100644
--- a/drivers/net/wireless/wl12xx/sdio.c
+++ b/drivers/net/wireless/wl12xx/sdio.c
@@ -164,7 +164,7 @@ static int wl1271_sdio_power_on(struct wl1271 *wl)
164 /* If enabled, tell runtime PM not to power off the card */ 164 /* If enabled, tell runtime PM not to power off the card */
165 if (pm_runtime_enabled(&func->dev)) { 165 if (pm_runtime_enabled(&func->dev)) {
166 ret = pm_runtime_get_sync(&func->dev); 166 ret = pm_runtime_get_sync(&func->dev);
167 if (ret) 167 if (ret < 0)
168 goto out; 168 goto out;
169 } else { 169 } else {
170 /* Runtime PM is disabled: power up the card manually */ 170 /* Runtime PM is disabled: power up the card manually */
diff --git a/drivers/net/wireless/wl12xx/testmode.c b/drivers/net/wireless/wl12xx/testmode.c
index 5d5e1ef87206..4ae8effaee22 100644
--- a/drivers/net/wireless/wl12xx/testmode.c
+++ b/drivers/net/wireless/wl12xx/testmode.c
@@ -36,7 +36,6 @@ enum wl1271_tm_commands {
36 WL1271_TM_CMD_TEST, 36 WL1271_TM_CMD_TEST,
37 WL1271_TM_CMD_INTERROGATE, 37 WL1271_TM_CMD_INTERROGATE,
38 WL1271_TM_CMD_CONFIGURE, 38 WL1271_TM_CMD_CONFIGURE,
39 WL1271_TM_CMD_NVS_PUSH,
40 WL1271_TM_CMD_SET_PLT_MODE, 39 WL1271_TM_CMD_SET_PLT_MODE,
41 WL1271_TM_CMD_RECOVER, 40 WL1271_TM_CMD_RECOVER,
42 41
@@ -139,12 +138,15 @@ static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[])
139 138
140 if (ret < 0) { 139 if (ret < 0) {
141 wl1271_warning("testmode cmd interrogate failed: %d", ret); 140 wl1271_warning("testmode cmd interrogate failed: %d", ret);
141 kfree(cmd);
142 return ret; 142 return ret;
143 } 143 }
144 144
145 skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, sizeof(*cmd)); 145 skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, sizeof(*cmd));
146 if (!skb) 146 if (!skb) {
147 kfree(cmd);
147 return -ENOMEM; 148 return -ENOMEM;
149 }
148 150
149 NLA_PUT(skb, WL1271_TM_ATTR_DATA, sizeof(*cmd), cmd); 151 NLA_PUT(skb, WL1271_TM_ATTR_DATA, sizeof(*cmd), cmd);
150 152
@@ -187,48 +189,6 @@ static int wl1271_tm_cmd_configure(struct wl1271 *wl, struct nlattr *tb[])
187 return 0; 189 return 0;
188} 190}
189 191
190static int wl1271_tm_cmd_nvs_push(struct wl1271 *wl, struct nlattr *tb[])
191{
192 int ret = 0;
193 size_t len;
194 void *buf;
195
196 wl1271_debug(DEBUG_TESTMODE, "testmode cmd nvs push");
197
198 if (!tb[WL1271_TM_ATTR_DATA])
199 return -EINVAL;
200
201 buf = nla_data(tb[WL1271_TM_ATTR_DATA]);
202 len = nla_len(tb[WL1271_TM_ATTR_DATA]);
203
204 mutex_lock(&wl->mutex);
205
206 kfree(wl->nvs);
207
208 if ((wl->chip.id == CHIP_ID_1283_PG20) &&
209 (len != sizeof(struct wl128x_nvs_file)))
210 return -EINVAL;
211 else if (len != sizeof(struct wl1271_nvs_file))
212 return -EINVAL;
213
214 wl->nvs = kzalloc(len, GFP_KERNEL);
215 if (!wl->nvs) {
216 wl1271_error("could not allocate memory for the nvs file");
217 ret = -ENOMEM;
218 goto out;
219 }
220
221 memcpy(wl->nvs, buf, len);
222 wl->nvs_len = len;
223
224 wl1271_debug(DEBUG_TESTMODE, "testmode pushed nvs");
225
226out:
227 mutex_unlock(&wl->mutex);
228
229 return ret;
230}
231
232static int wl1271_tm_cmd_set_plt_mode(struct wl1271 *wl, struct nlattr *tb[]) 192static int wl1271_tm_cmd_set_plt_mode(struct wl1271 *wl, struct nlattr *tb[])
233{ 193{
234 u32 val; 194 u32 val;
@@ -285,8 +245,6 @@ int wl1271_tm_cmd(struct ieee80211_hw *hw, void *data, int len)
285 return wl1271_tm_cmd_interrogate(wl, tb); 245 return wl1271_tm_cmd_interrogate(wl, tb);
286 case WL1271_TM_CMD_CONFIGURE: 246 case WL1271_TM_CMD_CONFIGURE:
287 return wl1271_tm_cmd_configure(wl, tb); 247 return wl1271_tm_cmd_configure(wl, tb);
288 case WL1271_TM_CMD_NVS_PUSH:
289 return wl1271_tm_cmd_nvs_push(wl, tb);
290 case WL1271_TM_CMD_SET_PLT_MODE: 248 case WL1271_TM_CMD_SET_PLT_MODE:
291 return wl1271_tm_cmd_set_plt_mode(wl, tb); 249 return wl1271_tm_cmd_set_plt_mode(wl, tb);
292 case WL1271_TM_CMD_RECOVER: 250 case WL1271_TM_CMD_RECOVER:
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 0ca86f9ec4ed..182562952c79 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -327,12 +327,12 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
327 xenvif_get(vif); 327 xenvif_get(vif);
328 328
329 rtnl_lock(); 329 rtnl_lock();
330 if (netif_running(vif->dev))
331 xenvif_up(vif);
332 if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN) 330 if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
333 dev_set_mtu(vif->dev, ETH_DATA_LEN); 331 dev_set_mtu(vif->dev, ETH_DATA_LEN);
334 netdev_update_features(vif->dev); 332 netdev_update_features(vif->dev);
335 netif_carrier_on(vif->dev); 333 netif_carrier_on(vif->dev);
334 if (netif_running(vif->dev))
335 xenvif_up(vif);
336 rtnl_unlock(); 336 rtnl_unlock();
337 337
338 return 0; 338 return 0;
diff --git a/drivers/pci/hotplug/pcihp_slot.c b/drivers/pci/hotplug/pcihp_slot.c
index 749fdf070319..3ffd9c1acc0a 100644
--- a/drivers/pci/hotplug/pcihp_slot.c
+++ b/drivers/pci/hotplug/pcihp_slot.c
@@ -158,47 +158,6 @@ static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
158 */ 158 */
159} 159}
160 160
161/* Program PCIE MaxPayload setting on device: ensure parent maxpayload <= device */
162static int pci_set_payload(struct pci_dev *dev)
163{
164 int pos, ppos;
165 u16 pctl, psz;
166 u16 dctl, dsz, dcap, dmax;
167 struct pci_dev *parent;
168
169 parent = dev->bus->self;
170 pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
171 if (!pos)
172 return 0;
173
174 /* Read Device MaxPayload capability and setting */
175 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &dctl);
176 pci_read_config_word(dev, pos + PCI_EXP_DEVCAP, &dcap);
177 dsz = (dctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
178 dmax = (dcap & PCI_EXP_DEVCAP_PAYLOAD);
179
180 /* Read Parent MaxPayload setting */
181 ppos = pci_find_capability(parent, PCI_CAP_ID_EXP);
182 if (!ppos)
183 return 0;
184 pci_read_config_word(parent, ppos + PCI_EXP_DEVCTL, &pctl);
185 psz = (pctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
186
187 /* If parent payload > device max payload -> error
188 * If parent payload > device payload -> set speed
189 * If parent payload <= device payload -> do nothing
190 */
191 if (psz > dmax)
192 return -1;
193 else if (psz > dsz) {
194 dev_info(&dev->dev, "Setting MaxPayload to %d\n", 128 << psz);
195 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL,
196 (dctl & ~PCI_EXP_DEVCTL_PAYLOAD) +
197 (psz << 5));
198 }
199 return 0;
200}
201
202void pci_configure_slot(struct pci_dev *dev) 161void pci_configure_slot(struct pci_dev *dev)
203{ 162{
204 struct pci_dev *cdev; 163 struct pci_dev *cdev;
@@ -210,9 +169,9 @@ void pci_configure_slot(struct pci_dev *dev)
210 (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI))) 169 (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI)))
211 return; 170 return;
212 171
213 ret = pci_set_payload(dev); 172 if (dev->bus && dev->bus->self)
214 if (ret) 173 pcie_bus_configure_settings(dev->bus,
215 dev_warn(&dev->dev, "could not set device max payload\n"); 174 dev->bus->self->pcie_mpss);
216 175
217 memset(&hpp, 0, sizeof(hpp)); 176 memset(&hpp, 0, sizeof(hpp));
218 ret = pci_get_hp_params(dev, &hpp); 177 ret = pci_get_hp_params(dev, &hpp);
diff --git a/drivers/pci/of.c b/drivers/pci/of.c
index c94d37ec55c8..f0929934bb7a 100644
--- a/drivers/pci/of.c
+++ b/drivers/pci/of.c
@@ -55,7 +55,7 @@ struct device_node * __weak pcibios_get_phb_of_node(struct pci_bus *bus)
55 */ 55 */
56 if (bus->bridge->of_node) 56 if (bus->bridge->of_node)
57 return of_node_get(bus->bridge->of_node); 57 return of_node_get(bus->bridge->of_node);
58 if (bus->bridge->parent->of_node) 58 if (bus->bridge->parent && bus->bridge->parent->of_node)
59 return of_node_get(bus->bridge->parent->of_node); 59 return of_node_get(bus->bridge->parent->of_node);
60 return NULL; 60 return NULL;
61} 61}
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 08a95b369d85..e9651f0a8817 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -77,6 +77,8 @@ unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
77unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE; 77unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
78unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE; 78unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
79 79
80enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
81
80/* 82/*
81 * The default CLS is used if arch didn't set CLS explicitly and not 83 * The default CLS is used if arch didn't set CLS explicitly and not
82 * all pci devices agree on the same value. Arch can override either 84 * all pci devices agree on the same value. Arch can override either
@@ -3223,6 +3225,67 @@ out:
3223EXPORT_SYMBOL(pcie_set_readrq); 3225EXPORT_SYMBOL(pcie_set_readrq);
3224 3226
3225/** 3227/**
3228 * pcie_get_mps - get PCI Express maximum payload size
3229 * @dev: PCI device to query
3230 *
3231 * Returns maximum payload size in bytes
3232 * or appropriate error value.
3233 */
3234int pcie_get_mps(struct pci_dev *dev)
3235{
3236 int ret, cap;
3237 u16 ctl;
3238
3239 cap = pci_pcie_cap(dev);
3240 if (!cap)
3241 return -EINVAL;
3242
3243 ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3244 if (!ret)
3245 ret = 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
3246
3247 return ret;
3248}
3249
3250/**
3251 * pcie_set_mps - set PCI Express maximum payload size
3252 * @dev: PCI device to query
3253 * @mps: maximum payload size in bytes
3254 * valid values are 128, 256, 512, 1024, 2048, 4096
3255 *
3256 * If possible sets maximum payload size
3257 */
3258int pcie_set_mps(struct pci_dev *dev, int mps)
3259{
3260 int cap, err = -EINVAL;
3261 u16 ctl, v;
3262
3263 if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
3264 goto out;
3265
3266 v = ffs(mps) - 8;
3267 if (v > dev->pcie_mpss)
3268 goto out;
3269 v <<= 5;
3270
3271 cap = pci_pcie_cap(dev);
3272 if (!cap)
3273 goto out;
3274
3275 err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3276 if (err)
3277 goto out;
3278
3279 if ((ctl & PCI_EXP_DEVCTL_PAYLOAD) != v) {
3280 ctl &= ~PCI_EXP_DEVCTL_PAYLOAD;
3281 ctl |= v;
3282 err = pci_write_config_word(dev, cap + PCI_EXP_DEVCTL, ctl);
3283 }
3284out:
3285 return err;
3286}
3287
3288/**
3226 * pci_select_bars - Make BAR mask from the type of resource 3289 * pci_select_bars - Make BAR mask from the type of resource
3227 * @dev: the PCI device for which BAR mask is made 3290 * @dev: the PCI device for which BAR mask is made
3228 * @flags: resource type mask to be selected 3291 * @flags: resource type mask to be selected
@@ -3505,6 +3568,14 @@ static int __init pci_setup(char *str)
3505 pci_hotplug_io_size = memparse(str + 9, &str); 3568 pci_hotplug_io_size = memparse(str + 9, &str);
3506 } else if (!strncmp(str, "hpmemsize=", 10)) { 3569 } else if (!strncmp(str, "hpmemsize=", 10)) {
3507 pci_hotplug_mem_size = memparse(str + 10, &str); 3570 pci_hotplug_mem_size = memparse(str + 10, &str);
3571 } else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
3572 pcie_bus_config = PCIE_BUS_TUNE_OFF;
3573 } else if (!strncmp(str, "pcie_bus_safe", 13)) {
3574 pcie_bus_config = PCIE_BUS_SAFE;
3575 } else if (!strncmp(str, "pcie_bus_perf", 13)) {
3576 pcie_bus_config = PCIE_BUS_PERFORMANCE;
3577 } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
3578 pcie_bus_config = PCIE_BUS_PEER2PEER;
3508 } else { 3579 } else {
3509 printk(KERN_ERR "PCI: Unknown option `%s'\n", 3580 printk(KERN_ERR "PCI: Unknown option `%s'\n",
3510 str); 3581 str);
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index c8cee764b0de..b74084e9ca12 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -283,6 +283,8 @@ static inline int pci_iov_bus_range(struct pci_bus *bus)
283 283
284#endif /* CONFIG_PCI_IOV */ 284#endif /* CONFIG_PCI_IOV */
285 285
286extern unsigned long pci_cardbus_resource_alignment(struct resource *);
287
286static inline resource_size_t pci_resource_alignment(struct pci_dev *dev, 288static inline resource_size_t pci_resource_alignment(struct pci_dev *dev,
287 struct resource *res) 289 struct resource *res)
288{ 290{
@@ -292,6 +294,8 @@ static inline resource_size_t pci_resource_alignment(struct pci_dev *dev,
292 if (resno >= PCI_IOV_RESOURCES && resno <= PCI_IOV_RESOURCE_END) 294 if (resno >= PCI_IOV_RESOURCES && resno <= PCI_IOV_RESOURCE_END)
293 return pci_sriov_resource_alignment(dev, resno); 295 return pci_sriov_resource_alignment(dev, resno);
294#endif 296#endif
297 if (dev->class >> 8 == PCI_CLASS_BRIDGE_CARDBUS)
298 return pci_cardbus_resource_alignment(res);
295 return resource_alignment(res); 299 return resource_alignment(res);
296} 300}
297 301
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 795c9026d55f..6ab6bd3df4b2 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -856,6 +856,8 @@ void set_pcie_port_type(struct pci_dev *pdev)
856 pdev->pcie_cap = pos; 856 pdev->pcie_cap = pos;
857 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16); 857 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
858 pdev->pcie_type = (reg16 & PCI_EXP_FLAGS_TYPE) >> 4; 858 pdev->pcie_type = (reg16 & PCI_EXP_FLAGS_TYPE) >> 4;
859 pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, &reg16);
860 pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
859} 861}
860 862
861void set_pcie_hotplug_bridge(struct pci_dev *pdev) 863void set_pcie_hotplug_bridge(struct pci_dev *pdev)
@@ -1326,6 +1328,163 @@ int pci_scan_slot(struct pci_bus *bus, int devfn)
1326 return nr; 1328 return nr;
1327} 1329}
1328 1330
1331static int pcie_find_smpss(struct pci_dev *dev, void *data)
1332{
1333 u8 *smpss = data;
1334
1335 if (!pci_is_pcie(dev))
1336 return 0;
1337
1338 /* For PCIE hotplug enabled slots not connected directly to a
1339 * PCI-E root port, there can be problems when hotplugging
1340 * devices. This is due to the possibility of hotplugging a
1341 * device into the fabric with a smaller MPS that the devices
1342 * currently running have configured. Modifying the MPS on the
1343 * running devices could cause a fatal bus error due to an
1344 * incoming frame being larger than the newly configured MPS.
1345 * To work around this, the MPS for the entire fabric must be
1346 * set to the minimum size. Any devices hotplugged into this
1347 * fabric will have the minimum MPS set. If the PCI hotplug
1348 * slot is directly connected to the root port and there are not
1349 * other devices on the fabric (which seems to be the most
1350 * common case), then this is not an issue and MPS discovery
1351 * will occur as normal.
1352 */
1353 if (dev->is_hotplug_bridge && (!list_is_singular(&dev->bus->devices) ||
1354 (dev->bus->self &&
1355 dev->bus->self->pcie_type != PCI_EXP_TYPE_ROOT_PORT)))
1356 *smpss = 0;
1357
1358 if (*smpss > dev->pcie_mpss)
1359 *smpss = dev->pcie_mpss;
1360
1361 return 0;
1362}
1363
1364static void pcie_write_mps(struct pci_dev *dev, int mps)
1365{
1366 int rc, dev_mpss;
1367
1368 dev_mpss = 128 << dev->pcie_mpss;
1369
1370 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
1371 if (dev->bus->self) {
1372 dev_dbg(&dev->bus->dev, "Bus MPSS %d\n",
1373 128 << dev->bus->self->pcie_mpss);
1374
1375 /* For "MPS Force Max", the assumption is made that
1376 * downstream communication will never be larger than
1377 * the MRRS. So, the MPS only needs to be configured
1378 * for the upstream communication. This being the case,
1379 * walk from the top down and set the MPS of the child
1380 * to that of the parent bus.
1381 */
1382 mps = 128 << dev->bus->self->pcie_mpss;
1383 if (mps > dev_mpss)
1384 dev_warn(&dev->dev, "MPS configured higher than"
1385 " maximum supported by the device. If"
1386 " a bus issue occurs, try running with"
1387 " pci=pcie_bus_safe.\n");
1388 }
1389
1390 dev->pcie_mpss = ffs(mps) - 8;
1391 }
1392
1393 rc = pcie_set_mps(dev, mps);
1394 if (rc)
1395 dev_err(&dev->dev, "Failed attempting to set the MPS\n");
1396}
1397
1398static void pcie_write_mrrs(struct pci_dev *dev, int mps)
1399{
1400 int rc, mrrs, dev_mpss;
1401
1402 /* In the "safe" case, do not configure the MRRS. There appear to be
1403 * issues with setting MRRS to 0 on a number of devices.
1404 */
1405
1406 if (pcie_bus_config != PCIE_BUS_PERFORMANCE)
1407 return;
1408
1409 dev_mpss = 128 << dev->pcie_mpss;
1410
1411 /* For Max performance, the MRRS must be set to the largest supported
1412 * value. However, it cannot be configured larger than the MPS the
1413 * device or the bus can support. This assumes that the largest MRRS
1414 * available on the device cannot be smaller than the device MPSS.
1415 */
1416 mrrs = min(mps, dev_mpss);
1417
1418 /* MRRS is a R/W register. Invalid values can be written, but a
1419 * subsequent read will verify if the value is acceptable or not.
1420 * If the MRRS value provided is not acceptable (e.g., too large),
1421 * shrink the value until it is acceptable to the HW.
1422 */
1423 while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) {
1424 dev_warn(&dev->dev, "Attempting to modify the PCI-E MRRS value"
1425 " to %d. If any issues are encountered, please try "
1426 "running with pci=pcie_bus_safe\n", mrrs);
1427 rc = pcie_set_readrq(dev, mrrs);
1428 if (rc)
1429 dev_err(&dev->dev,
1430 "Failed attempting to set the MRRS\n");
1431
1432 mrrs /= 2;
1433 }
1434}
1435
1436static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
1437{
1438 int mps = 128 << *(u8 *)data;
1439
1440 if (!pci_is_pcie(dev))
1441 return 0;
1442
1443 dev_dbg(&dev->dev, "Dev MPS %d MPSS %d MRRS %d\n",
1444 pcie_get_mps(dev), 128<<dev->pcie_mpss, pcie_get_readrq(dev));
1445
1446 pcie_write_mps(dev, mps);
1447 pcie_write_mrrs(dev, mps);
1448
1449 dev_dbg(&dev->dev, "Dev MPS %d MPSS %d MRRS %d\n",
1450 pcie_get_mps(dev), 128<<dev->pcie_mpss, pcie_get_readrq(dev));
1451
1452 return 0;
1453}
1454
1455/* pcie_bus_configure_mps requires that pci_walk_bus work in a top-down,
1456 * parents then children fashion. If this changes, then this code will not
1457 * work as designed.
1458 */
1459void pcie_bus_configure_settings(struct pci_bus *bus, u8 mpss)
1460{
1461 u8 smpss;
1462
1463 if (!pci_is_pcie(bus->self))
1464 return;
1465
1466 if (pcie_bus_config == PCIE_BUS_TUNE_OFF)
1467 return;
1468
1469 /* FIXME - Peer to peer DMA is possible, though the endpoint would need
1470 * to be aware to the MPS of the destination. To work around this,
1471 * simply force the MPS of the entire system to the smallest possible.
1472 */
1473 if (pcie_bus_config == PCIE_BUS_PEER2PEER)
1474 smpss = 0;
1475
1476 if (pcie_bus_config == PCIE_BUS_SAFE) {
1477 smpss = mpss;
1478
1479 pcie_find_smpss(bus->self, &smpss);
1480 pci_walk_bus(bus, pcie_find_smpss, &smpss);
1481 }
1482
1483 pcie_bus_configure_set(bus->self, &smpss);
1484 pci_walk_bus(bus, pcie_bus_configure_set, &smpss);
1485}
1486EXPORT_SYMBOL_GPL(pcie_bus_configure_settings);
1487
1329unsigned int __devinit pci_scan_child_bus(struct pci_bus *bus) 1488unsigned int __devinit pci_scan_child_bus(struct pci_bus *bus)
1330{ 1489{
1331 unsigned int devfn, pass, max = bus->secondary; 1490 unsigned int devfn, pass, max = bus->secondary;
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 8a1d3c7863a8..784da9d36029 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -34,6 +34,7 @@ struct resource_list_x {
34 resource_size_t start; 34 resource_size_t start;
35 resource_size_t end; 35 resource_size_t end;
36 resource_size_t add_size; 36 resource_size_t add_size;
37 resource_size_t min_align;
37 unsigned long flags; 38 unsigned long flags;
38}; 39};
39 40
@@ -65,7 +66,7 @@ void pci_realloc(void)
65 */ 66 */
66static void add_to_list(struct resource_list_x *head, 67static void add_to_list(struct resource_list_x *head,
67 struct pci_dev *dev, struct resource *res, 68 struct pci_dev *dev, struct resource *res,
68 resource_size_t add_size) 69 resource_size_t add_size, resource_size_t min_align)
69{ 70{
70 struct resource_list_x *list = head; 71 struct resource_list_x *list = head;
71 struct resource_list_x *ln = list->next; 72 struct resource_list_x *ln = list->next;
@@ -84,13 +85,16 @@ static void add_to_list(struct resource_list_x *head,
84 tmp->end = res->end; 85 tmp->end = res->end;
85 tmp->flags = res->flags; 86 tmp->flags = res->flags;
86 tmp->add_size = add_size; 87 tmp->add_size = add_size;
88 tmp->min_align = min_align;
87 list->next = tmp; 89 list->next = tmp;
88} 90}
89 91
90static void add_to_failed_list(struct resource_list_x *head, 92static void add_to_failed_list(struct resource_list_x *head,
91 struct pci_dev *dev, struct resource *res) 93 struct pci_dev *dev, struct resource *res)
92{ 94{
93 add_to_list(head, dev, res, 0); 95 add_to_list(head, dev, res,
96 0 /* dont care */,
97 0 /* dont care */);
94} 98}
95 99
96static void __dev_sort_resources(struct pci_dev *dev, 100static void __dev_sort_resources(struct pci_dev *dev,
@@ -121,18 +125,18 @@ static inline void reset_resource(struct resource *res)
121} 125}
122 126
123/** 127/**
124 * adjust_resources_sorted() - satisfy any additional resource requests 128 * reassign_resources_sorted() - satisfy any additional resource requests
125 * 129 *
126 * @add_head : head of the list tracking requests requiring additional 130 * @realloc_head : head of the list tracking requests requiring additional
127 * resources 131 * resources
128 * @head : head of the list tracking requests with allocated 132 * @head : head of the list tracking requests with allocated
129 * resources 133 * resources
130 * 134 *
131 * Walk through each element of the add_head and try to procure 135 * Walk through each element of the realloc_head and try to procure
132 * additional resources for the element, provided the element 136 * additional resources for the element, provided the element
133 * is in the head list. 137 * is in the head list.
134 */ 138 */
135static void adjust_resources_sorted(struct resource_list_x *add_head, 139static void reassign_resources_sorted(struct resource_list_x *realloc_head,
136 struct resource_list *head) 140 struct resource_list *head)
137{ 141{
138 struct resource *res; 142 struct resource *res;
@@ -141,8 +145,8 @@ static void adjust_resources_sorted(struct resource_list_x *add_head,
141 resource_size_t add_size; 145 resource_size_t add_size;
142 int idx; 146 int idx;
143 147
144 prev = add_head; 148 prev = realloc_head;
145 for (list = add_head->next; list;) { 149 for (list = realloc_head->next; list;) {
146 res = list->res; 150 res = list->res;
147 /* skip resource that has been reset */ 151 /* skip resource that has been reset */
148 if (!res->flags) 152 if (!res->flags)
@@ -159,13 +163,17 @@ static void adjust_resources_sorted(struct resource_list_x *add_head,
159 163
160 idx = res - &list->dev->resource[0]; 164 idx = res - &list->dev->resource[0];
161 add_size=list->add_size; 165 add_size=list->add_size;
162 if (!resource_size(res) && add_size) { 166 if (!resource_size(res)) {
163 res->end = res->start + add_size - 1; 167 res->start = list->start;
164 if(pci_assign_resource(list->dev, idx)) 168 res->end = res->start + add_size - 1;
169 if(pci_assign_resource(list->dev, idx))
165 reset_resource(res); 170 reset_resource(res);
166 } else if (add_size) { 171 } else {
167 adjust_resource(res, res->start, 172 resource_size_t align = list->min_align;
168 resource_size(res) + add_size); 173 res->flags |= list->flags & (IORESOURCE_STARTALIGN|IORESOURCE_SIZEALIGN);
174 if (pci_reassign_resource(list->dev, idx, add_size, align))
175 dev_printk(KERN_DEBUG, &list->dev->dev, "failed to add optional resources res=%pR\n",
176 res);
169 } 177 }
170out: 178out:
171 tmp = list; 179 tmp = list;
@@ -210,16 +218,16 @@ static void assign_requested_resources_sorted(struct resource_list *head,
210} 218}
211 219
212static void __assign_resources_sorted(struct resource_list *head, 220static void __assign_resources_sorted(struct resource_list *head,
213 struct resource_list_x *add_head, 221 struct resource_list_x *realloc_head,
214 struct resource_list_x *fail_head) 222 struct resource_list_x *fail_head)
215{ 223{
216 /* Satisfy the must-have resource requests */ 224 /* Satisfy the must-have resource requests */
217 assign_requested_resources_sorted(head, fail_head); 225 assign_requested_resources_sorted(head, fail_head);
218 226
219 /* Try to satisfy any additional nice-to-have resource 227 /* Try to satisfy any additional optional resource
220 requests */ 228 requests */
221 if (add_head) 229 if (realloc_head)
222 adjust_resources_sorted(add_head, head); 230 reassign_resources_sorted(realloc_head, head);
223 free_list(resource_list, head); 231 free_list(resource_list, head);
224} 232}
225 233
@@ -235,7 +243,7 @@ static void pdev_assign_resources_sorted(struct pci_dev *dev,
235} 243}
236 244
237static void pbus_assign_resources_sorted(const struct pci_bus *bus, 245static void pbus_assign_resources_sorted(const struct pci_bus *bus,
238 struct resource_list_x *add_head, 246 struct resource_list_x *realloc_head,
239 struct resource_list_x *fail_head) 247 struct resource_list_x *fail_head)
240{ 248{
241 struct pci_dev *dev; 249 struct pci_dev *dev;
@@ -245,7 +253,7 @@ static void pbus_assign_resources_sorted(const struct pci_bus *bus,
245 list_for_each_entry(dev, &bus->devices, bus_list) 253 list_for_each_entry(dev, &bus->devices, bus_list)
246 __dev_sort_resources(dev, &head); 254 __dev_sort_resources(dev, &head);
247 255
248 __assign_resources_sorted(&head, add_head, fail_head); 256 __assign_resources_sorted(&head, realloc_head, fail_head);
249} 257}
250 258
251void pci_setup_cardbus(struct pci_bus *bus) 259void pci_setup_cardbus(struct pci_bus *bus)
@@ -540,13 +548,27 @@ static resource_size_t calculate_memsize(resource_size_t size,
540 return size; 548 return size;
541} 549}
542 550
551static resource_size_t get_res_add_size(struct resource_list_x *realloc_head,
552 struct resource *res)
553{
554 struct resource_list_x *list;
555
556 /* check if it is in realloc_head list */
557 for (list = realloc_head->next; list && list->res != res;
558 list = list->next);
559 if (list)
560 return list->add_size;
561
562 return 0;
563}
564
543/** 565/**
544 * pbus_size_io() - size the io window of a given bus 566 * pbus_size_io() - size the io window of a given bus
545 * 567 *
546 * @bus : the bus 568 * @bus : the bus
547 * @min_size : the minimum io window that must to be allocated 569 * @min_size : the minimum io window that must to be allocated
548 * @add_size : additional optional io window 570 * @add_size : additional optional io window
549 * @add_head : track the additional io window on this list 571 * @realloc_head : track the additional io window on this list
550 * 572 *
551 * Sizing the IO windows of the PCI-PCI bridge is trivial, 573 * Sizing the IO windows of the PCI-PCI bridge is trivial,
552 * since these windows have 4K granularity and the IO ranges 574 * since these windows have 4K granularity and the IO ranges
@@ -554,11 +576,12 @@ static resource_size_t calculate_memsize(resource_size_t size,
554 * We must be careful with the ISA aliasing though. 576 * We must be careful with the ISA aliasing though.
555 */ 577 */
556static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size, 578static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
557 resource_size_t add_size, struct resource_list_x *add_head) 579 resource_size_t add_size, struct resource_list_x *realloc_head)
558{ 580{
559 struct pci_dev *dev; 581 struct pci_dev *dev;
560 struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO); 582 struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO);
561 unsigned long size = 0, size0 = 0, size1 = 0; 583 unsigned long size = 0, size0 = 0, size1 = 0;
584 resource_size_t children_add_size = 0;
562 585
563 if (!b_res) 586 if (!b_res)
564 return; 587 return;
@@ -579,11 +602,16 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
579 size += r_size; 602 size += r_size;
580 else 603 else
581 size1 += r_size; 604 size1 += r_size;
605
606 if (realloc_head)
607 children_add_size += get_res_add_size(realloc_head, r);
582 } 608 }
583 } 609 }
584 size0 = calculate_iosize(size, min_size, size1, 610 size0 = calculate_iosize(size, min_size, size1,
585 resource_size(b_res), 4096); 611 resource_size(b_res), 4096);
586 size1 = (!add_head || (add_head && !add_size)) ? size0 : 612 if (children_add_size > add_size)
613 add_size = children_add_size;
614 size1 = (!realloc_head || (realloc_head && !add_size)) ? size0 :
587 calculate_iosize(size, min_size+add_size, size1, 615 calculate_iosize(size, min_size+add_size, size1,
588 resource_size(b_res), 4096); 616 resource_size(b_res), 4096);
589 if (!size0 && !size1) { 617 if (!size0 && !size1) {
@@ -598,8 +626,8 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
598 b_res->start = 4096; 626 b_res->start = 4096;
599 b_res->end = b_res->start + size0 - 1; 627 b_res->end = b_res->start + size0 - 1;
600 b_res->flags |= IORESOURCE_STARTALIGN; 628 b_res->flags |= IORESOURCE_STARTALIGN;
601 if (size1 > size0 && add_head) 629 if (size1 > size0 && realloc_head)
602 add_to_list(add_head, bus->self, b_res, size1-size0); 630 add_to_list(realloc_head, bus->self, b_res, size1-size0, 4096);
603} 631}
604 632
605/** 633/**
@@ -608,7 +636,7 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
608 * @bus : the bus 636 * @bus : the bus
609 * @min_size : the minimum memory window that must to be allocated 637 * @min_size : the minimum memory window that must to be allocated
610 * @add_size : additional optional memory window 638 * @add_size : additional optional memory window
611 * @add_head : track the additional memory window on this list 639 * @realloc_head : track the additional memory window on this list
612 * 640 *
613 * Calculate the size of the bus and minimal alignment which 641 * Calculate the size of the bus and minimal alignment which
614 * guarantees that all child resources fit in this size. 642 * guarantees that all child resources fit in this size.
@@ -616,7 +644,7 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
616static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, 644static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
617 unsigned long type, resource_size_t min_size, 645 unsigned long type, resource_size_t min_size,
618 resource_size_t add_size, 646 resource_size_t add_size,
619 struct resource_list_x *add_head) 647 struct resource_list_x *realloc_head)
620{ 648{
621 struct pci_dev *dev; 649 struct pci_dev *dev;
622 resource_size_t min_align, align, size, size0, size1; 650 resource_size_t min_align, align, size, size0, size1;
@@ -624,6 +652,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
624 int order, max_order; 652 int order, max_order;
625 struct resource *b_res = find_free_bus_resource(bus, type); 653 struct resource *b_res = find_free_bus_resource(bus, type);
626 unsigned int mem64_mask = 0; 654 unsigned int mem64_mask = 0;
655 resource_size_t children_add_size = 0;
627 656
628 if (!b_res) 657 if (!b_res)
629 return 0; 658 return 0;
@@ -645,6 +674,16 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
645 if (r->parent || (r->flags & mask) != type) 674 if (r->parent || (r->flags & mask) != type)
646 continue; 675 continue;
647 r_size = resource_size(r); 676 r_size = resource_size(r);
677#ifdef CONFIG_PCI_IOV
678 /* put SRIOV requested res to the optional list */
679 if (realloc_head && i >= PCI_IOV_RESOURCES &&
680 i <= PCI_IOV_RESOURCE_END) {
681 r->end = r->start - 1;
682 add_to_list(realloc_head, dev, r, r_size, 0/* dont' care */);
683 children_add_size += r_size;
684 continue;
685 }
686#endif
648 /* For bridges size != alignment */ 687 /* For bridges size != alignment */
649 align = pci_resource_alignment(dev, r); 688 align = pci_resource_alignment(dev, r);
650 order = __ffs(align) - 20; 689 order = __ffs(align) - 20;
@@ -665,6 +704,9 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
665 if (order > max_order) 704 if (order > max_order)
666 max_order = order; 705 max_order = order;
667 mem64_mask &= r->flags & IORESOURCE_MEM_64; 706 mem64_mask &= r->flags & IORESOURCE_MEM_64;
707
708 if (realloc_head)
709 children_add_size += get_res_add_size(realloc_head, r);
668 } 710 }
669 } 711 }
670 align = 0; 712 align = 0;
@@ -681,7 +723,9 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
681 align += aligns[order]; 723 align += aligns[order];
682 } 724 }
683 size0 = calculate_memsize(size, min_size, 0, resource_size(b_res), min_align); 725 size0 = calculate_memsize(size, min_size, 0, resource_size(b_res), min_align);
684 size1 = (!add_head || (add_head && !add_size)) ? size0 : 726 if (children_add_size > add_size)
727 add_size = children_add_size;
728 size1 = (!realloc_head || (realloc_head && !add_size)) ? size0 :
685 calculate_memsize(size, min_size+add_size, 0, 729 calculate_memsize(size, min_size+add_size, 0,
686 resource_size(b_res), min_align); 730 resource_size(b_res), min_align);
687 if (!size0 && !size1) { 731 if (!size0 && !size1) {
@@ -695,12 +739,22 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
695 b_res->start = min_align; 739 b_res->start = min_align;
696 b_res->end = size0 + min_align - 1; 740 b_res->end = size0 + min_align - 1;
697 b_res->flags |= IORESOURCE_STARTALIGN | mem64_mask; 741 b_res->flags |= IORESOURCE_STARTALIGN | mem64_mask;
698 if (size1 > size0 && add_head) 742 if (size1 > size0 && realloc_head)
699 add_to_list(add_head, bus->self, b_res, size1-size0); 743 add_to_list(realloc_head, bus->self, b_res, size1-size0, min_align);
700 return 1; 744 return 1;
701} 745}
702 746
703static void pci_bus_size_cardbus(struct pci_bus *bus) 747unsigned long pci_cardbus_resource_alignment(struct resource *res)
748{
749 if (res->flags & IORESOURCE_IO)
750 return pci_cardbus_io_size;
751 if (res->flags & IORESOURCE_MEM)
752 return pci_cardbus_mem_size;
753 return 0;
754}
755
756static void pci_bus_size_cardbus(struct pci_bus *bus,
757 struct resource_list_x *realloc_head)
704{ 758{
705 struct pci_dev *bridge = bus->self; 759 struct pci_dev *bridge = bus->self;
706 struct resource *b_res = &bridge->resource[PCI_BRIDGE_RESOURCES]; 760 struct resource *b_res = &bridge->resource[PCI_BRIDGE_RESOURCES];
@@ -711,12 +765,14 @@ static void pci_bus_size_cardbus(struct pci_bus *bus)
711 * a fixed amount of bus space for CardBus bridges. 765 * a fixed amount of bus space for CardBus bridges.
712 */ 766 */
713 b_res[0].start = 0; 767 b_res[0].start = 0;
714 b_res[0].end = pci_cardbus_io_size - 1;
715 b_res[0].flags |= IORESOURCE_IO | IORESOURCE_SIZEALIGN; 768 b_res[0].flags |= IORESOURCE_IO | IORESOURCE_SIZEALIGN;
769 if (realloc_head)
770 add_to_list(realloc_head, bridge, b_res, pci_cardbus_io_size, 0 /* dont care */);
716 771
717 b_res[1].start = 0; 772 b_res[1].start = 0;
718 b_res[1].end = pci_cardbus_io_size - 1;
719 b_res[1].flags |= IORESOURCE_IO | IORESOURCE_SIZEALIGN; 773 b_res[1].flags |= IORESOURCE_IO | IORESOURCE_SIZEALIGN;
774 if (realloc_head)
775 add_to_list(realloc_head, bridge, b_res+1, pci_cardbus_io_size, 0 /* dont care */);
720 776
721 /* 777 /*
722 * Check whether prefetchable memory is supported 778 * Check whether prefetchable memory is supported
@@ -736,21 +792,31 @@ static void pci_bus_size_cardbus(struct pci_bus *bus)
736 */ 792 */
737 if (ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0) { 793 if (ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0) {
738 b_res[2].start = 0; 794 b_res[2].start = 0;
739 b_res[2].end = pci_cardbus_mem_size - 1;
740 b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH | IORESOURCE_SIZEALIGN; 795 b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH | IORESOURCE_SIZEALIGN;
796 if (realloc_head)
797 add_to_list(realloc_head, bridge, b_res+2, pci_cardbus_mem_size, 0 /* dont care */);
741 798
742 b_res[3].start = 0; 799 b_res[3].start = 0;
743 b_res[3].end = pci_cardbus_mem_size - 1;
744 b_res[3].flags |= IORESOURCE_MEM | IORESOURCE_SIZEALIGN; 800 b_res[3].flags |= IORESOURCE_MEM | IORESOURCE_SIZEALIGN;
801 if (realloc_head)
802 add_to_list(realloc_head, bridge, b_res+3, pci_cardbus_mem_size, 0 /* dont care */);
745 } else { 803 } else {
746 b_res[3].start = 0; 804 b_res[3].start = 0;
747 b_res[3].end = pci_cardbus_mem_size * 2 - 1;
748 b_res[3].flags |= IORESOURCE_MEM | IORESOURCE_SIZEALIGN; 805 b_res[3].flags |= IORESOURCE_MEM | IORESOURCE_SIZEALIGN;
806 if (realloc_head)
807 add_to_list(realloc_head, bridge, b_res+3, pci_cardbus_mem_size * 2, 0 /* dont care */);
749 } 808 }
809
810 /* set the size of the resource to zero, so that the resource does not
811 * get assigned during required-resource allocation cycle but gets assigned
812 * during the optional-resource allocation cycle.
813 */
814 b_res[0].start = b_res[1].start = b_res[2].start = b_res[3].start = 1;
815 b_res[0].end = b_res[1].end = b_res[2].end = b_res[3].end = 0;
750} 816}
751 817
752void __ref __pci_bus_size_bridges(struct pci_bus *bus, 818void __ref __pci_bus_size_bridges(struct pci_bus *bus,
753 struct resource_list_x *add_head) 819 struct resource_list_x *realloc_head)
754{ 820{
755 struct pci_dev *dev; 821 struct pci_dev *dev;
756 unsigned long mask, prefmask; 822 unsigned long mask, prefmask;
@@ -763,12 +829,12 @@ void __ref __pci_bus_size_bridges(struct pci_bus *bus,
763 829
764 switch (dev->class >> 8) { 830 switch (dev->class >> 8) {
765 case PCI_CLASS_BRIDGE_CARDBUS: 831 case PCI_CLASS_BRIDGE_CARDBUS:
766 pci_bus_size_cardbus(b); 832 pci_bus_size_cardbus(b, realloc_head);
767 break; 833 break;
768 834
769 case PCI_CLASS_BRIDGE_PCI: 835 case PCI_CLASS_BRIDGE_PCI:
770 default: 836 default:
771 __pci_bus_size_bridges(b, add_head); 837 __pci_bus_size_bridges(b, realloc_head);
772 break; 838 break;
773 } 839 }
774 } 840 }
@@ -792,7 +858,7 @@ void __ref __pci_bus_size_bridges(struct pci_bus *bus,
792 * Follow thru 858 * Follow thru
793 */ 859 */
794 default: 860 default:
795 pbus_size_io(bus, 0, additional_io_size, add_head); 861 pbus_size_io(bus, 0, additional_io_size, realloc_head);
796 /* If the bridge supports prefetchable range, size it 862 /* If the bridge supports prefetchable range, size it
797 separately. If it doesn't, or its prefetchable window 863 separately. If it doesn't, or its prefetchable window
798 has already been allocated by arch code, try 864 has already been allocated by arch code, try
@@ -800,11 +866,11 @@ void __ref __pci_bus_size_bridges(struct pci_bus *bus,
800 resources. */ 866 resources. */
801 mask = IORESOURCE_MEM; 867 mask = IORESOURCE_MEM;
802 prefmask = IORESOURCE_MEM | IORESOURCE_PREFETCH; 868 prefmask = IORESOURCE_MEM | IORESOURCE_PREFETCH;
803 if (pbus_size_mem(bus, prefmask, prefmask, 0, additional_mem_size, add_head)) 869 if (pbus_size_mem(bus, prefmask, prefmask, 0, additional_mem_size, realloc_head))
804 mask = prefmask; /* Success, size non-prefetch only. */ 870 mask = prefmask; /* Success, size non-prefetch only. */
805 else 871 else
806 additional_mem_size += additional_mem_size; 872 additional_mem_size += additional_mem_size;
807 pbus_size_mem(bus, mask, IORESOURCE_MEM, 0, additional_mem_size, add_head); 873 pbus_size_mem(bus, mask, IORESOURCE_MEM, 0, additional_mem_size, realloc_head);
808 break; 874 break;
809 } 875 }
810} 876}
@@ -816,20 +882,20 @@ void __ref pci_bus_size_bridges(struct pci_bus *bus)
816EXPORT_SYMBOL(pci_bus_size_bridges); 882EXPORT_SYMBOL(pci_bus_size_bridges);
817 883
818static void __ref __pci_bus_assign_resources(const struct pci_bus *bus, 884static void __ref __pci_bus_assign_resources(const struct pci_bus *bus,
819 struct resource_list_x *add_head, 885 struct resource_list_x *realloc_head,
820 struct resource_list_x *fail_head) 886 struct resource_list_x *fail_head)
821{ 887{
822 struct pci_bus *b; 888 struct pci_bus *b;
823 struct pci_dev *dev; 889 struct pci_dev *dev;
824 890
825 pbus_assign_resources_sorted(bus, add_head, fail_head); 891 pbus_assign_resources_sorted(bus, realloc_head, fail_head);
826 892
827 list_for_each_entry(dev, &bus->devices, bus_list) { 893 list_for_each_entry(dev, &bus->devices, bus_list) {
828 b = dev->subordinate; 894 b = dev->subordinate;
829 if (!b) 895 if (!b)
830 continue; 896 continue;
831 897
832 __pci_bus_assign_resources(b, add_head, fail_head); 898 __pci_bus_assign_resources(b, realloc_head, fail_head);
833 899
834 switch (dev->class >> 8) { 900 switch (dev->class >> 8) {
835 case PCI_CLASS_BRIDGE_PCI: 901 case PCI_CLASS_BRIDGE_PCI:
@@ -1039,7 +1105,7 @@ void __init
1039pci_assign_unassigned_resources(void) 1105pci_assign_unassigned_resources(void)
1040{ 1106{
1041 struct pci_bus *bus; 1107 struct pci_bus *bus;
1042 struct resource_list_x add_list; /* list of resources that 1108 struct resource_list_x realloc_list; /* list of resources that
1043 want additional resources */ 1109 want additional resources */
1044 int tried_times = 0; 1110 int tried_times = 0;
1045 enum release_type rel_type = leaf_only; 1111 enum release_type rel_type = leaf_only;
@@ -1052,7 +1118,7 @@ pci_assign_unassigned_resources(void)
1052 1118
1053 1119
1054 head.next = NULL; 1120 head.next = NULL;
1055 add_list.next = NULL; 1121 realloc_list.next = NULL;
1056 1122
1057 pci_try_num = max_depth + 1; 1123 pci_try_num = max_depth + 1;
1058 printk(KERN_DEBUG "PCI: max bus depth: %d pci_try_num: %d\n", 1124 printk(KERN_DEBUG "PCI: max bus depth: %d pci_try_num: %d\n",
@@ -1062,12 +1128,12 @@ again:
1062 /* Depth first, calculate sizes and alignments of all 1128 /* Depth first, calculate sizes and alignments of all
1063 subordinate buses. */ 1129 subordinate buses. */
1064 list_for_each_entry(bus, &pci_root_buses, node) 1130 list_for_each_entry(bus, &pci_root_buses, node)
1065 __pci_bus_size_bridges(bus, &add_list); 1131 __pci_bus_size_bridges(bus, &realloc_list);
1066 1132
1067 /* Depth last, allocate resources and update the hardware. */ 1133 /* Depth last, allocate resources and update the hardware. */
1068 list_for_each_entry(bus, &pci_root_buses, node) 1134 list_for_each_entry(bus, &pci_root_buses, node)
1069 __pci_bus_assign_resources(bus, &add_list, &head); 1135 __pci_bus_assign_resources(bus, &realloc_list, &head);
1070 BUG_ON(add_list.next); 1136 BUG_ON(realloc_list.next);
1071 tried_times++; 1137 tried_times++;
1072 1138
1073 /* any device complain? */ 1139 /* any device complain? */
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 319f359906e8..51a9095c7da4 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -128,16 +128,16 @@ void pci_disable_bridge_window(struct pci_dev *dev)
128} 128}
129#endif /* CONFIG_PCI_QUIRKS */ 129#endif /* CONFIG_PCI_QUIRKS */
130 130
131
132
131static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev, 133static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev,
132 int resno) 134 int resno, resource_size_t size, resource_size_t align)
133{ 135{
134 struct resource *res = dev->resource + resno; 136 struct resource *res = dev->resource + resno;
135 resource_size_t size, min, align; 137 resource_size_t min;
136 int ret; 138 int ret;
137 139
138 size = resource_size(res);
139 min = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM; 140 min = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM;
140 align = pci_resource_alignment(dev, res);
141 141
142 /* First, try exact prefetching match.. */ 142 /* First, try exact prefetching match.. */
143 ret = pci_bus_alloc_resource(bus, res, size, align, min, 143 ret = pci_bus_alloc_resource(bus, res, size, align, min,
@@ -154,56 +154,101 @@ static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev,
154 ret = pci_bus_alloc_resource(bus, res, size, align, min, 0, 154 ret = pci_bus_alloc_resource(bus, res, size, align, min, 0,
155 pcibios_align_resource, dev); 155 pcibios_align_resource, dev);
156 } 156 }
157 return ret;
158}
157 159
158 if (ret < 0 && dev->fw_addr[resno]) { 160static int pci_revert_fw_address(struct resource *res, struct pci_dev *dev,
159 struct resource *root, *conflict; 161 int resno, resource_size_t size)
160 resource_size_t start, end; 162{
163 struct resource *root, *conflict;
164 resource_size_t start, end;
165 int ret = 0;
161 166
162 /* 167 if (res->flags & IORESOURCE_IO)
163 * If we failed to assign anything, let's try the address 168 root = &ioport_resource;
164 * where firmware left it. That at least has a chance of 169 else
165 * working, which is better than just leaving it disabled. 170 root = &iomem_resource;
166 */ 171
172 start = res->start;
173 end = res->end;
174 res->start = dev->fw_addr[resno];
175 res->end = res->start + size - 1;
176 dev_info(&dev->dev, "BAR %d: trying firmware assignment %pR\n",
177 resno, res);
178 conflict = request_resource_conflict(root, res);
179 if (conflict) {
180 dev_info(&dev->dev,
181 "BAR %d: %pR conflicts with %s %pR\n", resno,
182 res, conflict->name, conflict);
183 res->start = start;
184 res->end = end;
185 ret = 1;
186 }
187 return ret;
188}
189
190static int _pci_assign_resource(struct pci_dev *dev, int resno, int size, resource_size_t min_align)
191{
192 struct resource *res = dev->resource + resno;
193 struct pci_bus *bus;
194 int ret;
195 char *type;
167 196
168 if (res->flags & IORESOURCE_IO) 197 bus = dev->bus;
169 root = &ioport_resource; 198 while ((ret = __pci_assign_resource(bus, dev, resno, size, min_align))) {
199 if (!bus->parent || !bus->self->transparent)
200 break;
201 bus = bus->parent;
202 }
203
204 if (ret) {
205 if (res->flags & IORESOURCE_MEM)
206 if (res->flags & IORESOURCE_PREFETCH)
207 type = "mem pref";
208 else
209 type = "mem";
210 else if (res->flags & IORESOURCE_IO)
211 type = "io";
170 else 212 else
171 root = &iomem_resource; 213 type = "unknown";
172 214 dev_info(&dev->dev,
173 start = res->start; 215 "BAR %d: can't assign %s (size %#llx)\n",
174 end = res->end; 216 resno, type, (unsigned long long) resource_size(res));
175 res->start = dev->fw_addr[resno];
176 res->end = res->start + size - 1;
177 dev_info(&dev->dev, "BAR %d: trying firmware assignment %pR\n",
178 resno, res);
179 conflict = request_resource_conflict(root, res);
180 if (conflict) {
181 dev_info(&dev->dev,
182 "BAR %d: %pR conflicts with %s %pR\n", resno,
183 res, conflict->name, conflict);
184 res->start = start;
185 res->end = end;
186 } else
187 ret = 0;
188 } 217 }
189 218
219 return ret;
220}
221
222int pci_reassign_resource(struct pci_dev *dev, int resno, resource_size_t addsize,
223 resource_size_t min_align)
224{
225 struct resource *res = dev->resource + resno;
226 resource_size_t new_size;
227 int ret;
228
229 if (!res->parent) {
230 dev_info(&dev->dev, "BAR %d: can't reassign an unassigned resouce %pR "
231 "\n", resno, res);
232 return -EINVAL;
233 }
234
235 new_size = resource_size(res) + addsize + min_align;
236 ret = _pci_assign_resource(dev, resno, new_size, min_align);
190 if (!ret) { 237 if (!ret) {
191 res->flags &= ~IORESOURCE_STARTALIGN; 238 res->flags &= ~IORESOURCE_STARTALIGN;
192 dev_info(&dev->dev, "BAR %d: assigned %pR\n", resno, res); 239 dev_info(&dev->dev, "BAR %d: assigned %pR\n", resno, res);
193 if (resno < PCI_BRIDGE_RESOURCES) 240 if (resno < PCI_BRIDGE_RESOURCES)
194 pci_update_resource(dev, resno); 241 pci_update_resource(dev, resno);
195 } 242 }
196
197 return ret; 243 return ret;
198} 244}
199 245
200int pci_assign_resource(struct pci_dev *dev, int resno) 246int pci_assign_resource(struct pci_dev *dev, int resno)
201{ 247{
202 struct resource *res = dev->resource + resno; 248 struct resource *res = dev->resource + resno;
203 resource_size_t align; 249 resource_size_t align, size;
204 struct pci_bus *bus; 250 struct pci_bus *bus;
205 int ret; 251 int ret;
206 char *type;
207 252
208 align = pci_resource_alignment(dev, res); 253 align = pci_resource_alignment(dev, res);
209 if (!align) { 254 if (!align) {
@@ -213,34 +258,27 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
213 } 258 }
214 259
215 bus = dev->bus; 260 bus = dev->bus;
216 while ((ret = __pci_assign_resource(bus, dev, resno))) { 261 size = resource_size(res);
217 if (bus->parent && bus->self->transparent) 262 ret = _pci_assign_resource(dev, resno, size, align);
218 bus = bus->parent;
219 else
220 bus = NULL;
221 if (bus)
222 continue;
223 break;
224 }
225 263
226 if (ret) { 264 /*
227 if (res->flags & IORESOURCE_MEM) 265 * If we failed to assign anything, let's try the address
228 if (res->flags & IORESOURCE_PREFETCH) 266 * where firmware left it. That at least has a chance of
229 type = "mem pref"; 267 * working, which is better than just leaving it disabled.
230 else 268 */
231 type = "mem"; 269 if (ret < 0 && dev->fw_addr[resno])
232 else if (res->flags & IORESOURCE_IO) 270 ret = pci_revert_fw_address(res, dev, resno, size);
233 type = "io";
234 else
235 type = "unknown";
236 dev_info(&dev->dev,
237 "BAR %d: can't assign %s (size %#llx)\n",
238 resno, type, (unsigned long long) resource_size(res));
239 }
240 271
272 if (!ret) {
273 res->flags &= ~IORESOURCE_STARTALIGN;
274 dev_info(&dev->dev, "BAR %d: assigned %pR\n", resno, res);
275 if (resno < PCI_BRIDGE_RESOURCES)
276 pci_update_resource(dev, resno);
277 }
241 return ret; 278 return ret;
242} 279}
243 280
281
244/* Sort resources by alignment */ 282/* Sort resources by alignment */
245void pdev_sort_resources(struct pci_dev *dev, struct resource_list *head) 283void pdev_sort_resources(struct pci_dev *dev, struct resource_list *head)
246{ 284{
diff --git a/drivers/pcmcia/pxa2xx_vpac270.c b/drivers/pcmcia/pxa2xx_vpac270.c
index e956f659089a..66ab92cf3105 100644
--- a/drivers/pcmcia/pxa2xx_vpac270.c
+++ b/drivers/pcmcia/pxa2xx_vpac270.c
@@ -17,7 +17,7 @@
17 17
18#include <asm/mach-types.h> 18#include <asm/mach-types.h>
19 19
20#include <mach/gpio.h> 20#include <asm/gpio.h>
21#include <mach/vpac270.h> 21#include <mach/vpac270.h>
22 22
23#include "soc_common.h" 23#include "soc_common.h"
diff --git a/drivers/power/max8997_charger.c b/drivers/power/max8997_charger.c
index 7106b49b26e4..ffc5033ea9c9 100644
--- a/drivers/power/max8997_charger.c
+++ b/drivers/power/max8997_charger.c
@@ -20,6 +20,7 @@
20 */ 20 */
21 21
22#include <linux/err.h> 22#include <linux/err.h>
23#include <linux/module.h>
23#include <linux/slab.h> 24#include <linux/slab.h>
24#include <linux/platform_device.h> 25#include <linux/platform_device.h>
25#include <linux/power_supply.h> 26#include <linux/power_supply.h>
diff --git a/drivers/power/max8998_charger.c b/drivers/power/max8998_charger.c
index cc21fa2120be..ef8efadb58cb 100644
--- a/drivers/power/max8998_charger.c
+++ b/drivers/power/max8998_charger.c
@@ -20,6 +20,7 @@
20 */ 20 */
21 21
22#include <linux/err.h> 22#include <linux/err.h>
23#include <linux/module.h>
23#include <linux/slab.h> 24#include <linux/slab.h>
24#include <linux/platform_device.h> 25#include <linux/platform_device.h>
25#include <linux/power_supply.h> 26#include <linux/power_supply.h>
diff --git a/drivers/power/s3c_adc_battery.c b/drivers/power/s3c_adc_battery.c
index a675e31b4f13..d32d0d70f9ba 100644
--- a/drivers/power/s3c_adc_battery.c
+++ b/drivers/power/s3c_adc_battery.c
@@ -20,6 +20,7 @@
20#include <linux/s3c_adc_battery.h> 20#include <linux/s3c_adc_battery.h>
21#include <linux/errno.h> 21#include <linux/errno.h>
22#include <linux/init.h> 22#include <linux/init.h>
23#include <linux/module.h>
23 24
24#include <plat/adc.h> 25#include <plat/adc.h>
25 26
diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c
index ee893581d4b7..ebe77dd87daf 100644
--- a/drivers/rapidio/rio-scan.c
+++ b/drivers/rapidio/rio-scan.c
@@ -505,8 +505,7 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net,
505 rdev->dev.dma_mask = &rdev->dma_mask; 505 rdev->dev.dma_mask = &rdev->dma_mask;
506 rdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); 506 rdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
507 507
508 if ((rdev->pef & RIO_PEF_INB_DOORBELL) && 508 if (rdev->dst_ops & RIO_DST_OPS_DOORBELL)
509 (rdev->dst_ops & RIO_DST_OPS_DOORBELL))
510 rio_init_dbell_res(&rdev->riores[RIO_DOORBELL_RESOURCE], 509 rio_init_dbell_res(&rdev->riores[RIO_DOORBELL_RESOURCE],
511 0, 0xffff); 510 0, 0xffff);
512 511
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 3195dbd3ec34..44e91e598f8d 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -639,7 +639,7 @@ EXPORT_SYMBOL_GPL(rtc_irq_unregister);
639static int rtc_update_hrtimer(struct rtc_device *rtc, int enabled) 639static int rtc_update_hrtimer(struct rtc_device *rtc, int enabled)
640{ 640{
641 /* 641 /*
642 * We unconditionally cancel the timer here, because otherwise 642 * We always cancel the timer here first, because otherwise
643 * we could run into BUG_ON(timer->state != HRTIMER_STATE_CALLBACK); 643 * we could run into BUG_ON(timer->state != HRTIMER_STATE_CALLBACK);
644 * when we manage to start the timer before the callback 644 * when we manage to start the timer before the callback
645 * returns HRTIMER_RESTART. 645 * returns HRTIMER_RESTART.
@@ -708,7 +708,7 @@ int rtc_irq_set_freq(struct rtc_device *rtc, struct rtc_task *task, int freq)
708 int err = 0; 708 int err = 0;
709 unsigned long flags; 709 unsigned long flags;
710 710
711 if (freq <= 0 || freq > 5000) 711 if (freq <= 0 || freq > RTC_MAX_FREQ)
712 return -EINVAL; 712 return -EINVAL;
713retry: 713retry:
714 spin_lock_irqsave(&rtc->irq_task_lock, flags); 714 spin_lock_irqsave(&rtc->irq_task_lock, flags);
diff --git a/drivers/rtc/rtc-ep93xx.c b/drivers/rtc/rtc-ep93xx.c
index 335551d333b2..14a42a1edc66 100644
--- a/drivers/rtc/rtc-ep93xx.c
+++ b/drivers/rtc/rtc-ep93xx.c
@@ -36,6 +36,7 @@
36 */ 36 */
37struct ep93xx_rtc { 37struct ep93xx_rtc {
38 void __iomem *mmio_base; 38 void __iomem *mmio_base;
39 struct rtc_device *rtc;
39}; 40};
40 41
41static int ep93xx_rtc_get_swcomp(struct device *dev, unsigned short *preload, 42static int ep93xx_rtc_get_swcomp(struct device *dev, unsigned short *preload,
@@ -130,7 +131,6 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev)
130{ 131{
131 struct ep93xx_rtc *ep93xx_rtc; 132 struct ep93xx_rtc *ep93xx_rtc;
132 struct resource *res; 133 struct resource *res;
133 struct rtc_device *rtc;
134 int err; 134 int err;
135 135
136 ep93xx_rtc = devm_kzalloc(&pdev->dev, sizeof(*ep93xx_rtc), GFP_KERNEL); 136 ep93xx_rtc = devm_kzalloc(&pdev->dev, sizeof(*ep93xx_rtc), GFP_KERNEL);
@@ -151,12 +151,12 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev)
151 return -ENXIO; 151 return -ENXIO;
152 152
153 pdev->dev.platform_data = ep93xx_rtc; 153 pdev->dev.platform_data = ep93xx_rtc;
154 platform_set_drvdata(pdev, rtc); 154 platform_set_drvdata(pdev, ep93xx_rtc);
155 155
156 rtc = rtc_device_register(pdev->name, 156 ep93xx_rtc->rtc = rtc_device_register(pdev->name,
157 &pdev->dev, &ep93xx_rtc_ops, THIS_MODULE); 157 &pdev->dev, &ep93xx_rtc_ops, THIS_MODULE);
158 if (IS_ERR(rtc)) { 158 if (IS_ERR(ep93xx_rtc->rtc)) {
159 err = PTR_ERR(rtc); 159 err = PTR_ERR(ep93xx_rtc->rtc);
160 goto exit; 160 goto exit;
161 } 161 }
162 162
@@ -167,7 +167,7 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev)
167 return 0; 167 return 0;
168 168
169fail: 169fail:
170 rtc_device_unregister(rtc); 170 rtc_device_unregister(ep93xx_rtc->rtc);
171exit: 171exit:
172 platform_set_drvdata(pdev, NULL); 172 platform_set_drvdata(pdev, NULL);
173 pdev->dev.platform_data = NULL; 173 pdev->dev.platform_data = NULL;
@@ -176,11 +176,11 @@ exit:
176 176
177static int __exit ep93xx_rtc_remove(struct platform_device *pdev) 177static int __exit ep93xx_rtc_remove(struct platform_device *pdev)
178{ 178{
179 struct rtc_device *rtc = platform_get_drvdata(pdev); 179 struct ep93xx_rtc *ep93xx_rtc = platform_get_drvdata(pdev);
180 180
181 sysfs_remove_group(&pdev->dev.kobj, &ep93xx_rtc_sysfs_files); 181 sysfs_remove_group(&pdev->dev.kobj, &ep93xx_rtc_sysfs_files);
182 platform_set_drvdata(pdev, NULL); 182 platform_set_drvdata(pdev, NULL);
183 rtc_device_unregister(rtc); 183 rtc_device_unregister(ep93xx_rtc->rtc);
184 pdev->dev.platform_data = NULL; 184 pdev->dev.platform_data = NULL;
185 185
186 return 0; 186 return 0;
diff --git a/drivers/rtc/rtc-imxdi.c b/drivers/rtc/rtc-imxdi.c
index 2dd3c0163272..d93a9608b1f0 100644
--- a/drivers/rtc/rtc-imxdi.c
+++ b/drivers/rtc/rtc-imxdi.c
@@ -35,6 +35,7 @@
35#include <linux/module.h> 35#include <linux/module.h>
36#include <linux/platform_device.h> 36#include <linux/platform_device.h>
37#include <linux/rtc.h> 37#include <linux/rtc.h>
38#include <linux/sched.h>
38#include <linux/workqueue.h> 39#include <linux/workqueue.h>
39 40
40/* DryIce Register Definitions */ 41/* DryIce Register Definitions */
diff --git a/drivers/rtc/rtc-lib.c b/drivers/rtc/rtc-lib.c
index 075f1708deae..c4cf05731118 100644
--- a/drivers/rtc/rtc-lib.c
+++ b/drivers/rtc/rtc-lib.c
@@ -85,6 +85,8 @@ void rtc_time_to_tm(unsigned long time, struct rtc_time *tm)
85 time -= tm->tm_hour * 3600; 85 time -= tm->tm_hour * 3600;
86 tm->tm_min = time / 60; 86 tm->tm_min = time / 60;
87 tm->tm_sec = time - tm->tm_min * 60; 87 tm->tm_sec = time - tm->tm_min * 60;
88
89 tm->tm_isdst = 0;
88} 90}
89EXPORT_SYMBOL(rtc_time_to_tm); 91EXPORT_SYMBOL(rtc_time_to_tm);
90 92
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index 9329dbb9ebab..7639ab906f02 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -51,6 +51,27 @@ static enum s3c_cpu_type s3c_rtc_cpu_type;
51 51
52static DEFINE_SPINLOCK(s3c_rtc_pie_lock); 52static DEFINE_SPINLOCK(s3c_rtc_pie_lock);
53 53
54static void s3c_rtc_alarm_clk_enable(bool enable)
55{
56 static DEFINE_SPINLOCK(s3c_rtc_alarm_clk_lock);
57 static bool alarm_clk_enabled;
58 unsigned long irq_flags;
59
60 spin_lock_irqsave(&s3c_rtc_alarm_clk_lock, irq_flags);
61 if (enable) {
62 if (!alarm_clk_enabled) {
63 clk_enable(rtc_clk);
64 alarm_clk_enabled = true;
65 }
66 } else {
67 if (alarm_clk_enabled) {
68 clk_disable(rtc_clk);
69 alarm_clk_enabled = false;
70 }
71 }
72 spin_unlock_irqrestore(&s3c_rtc_alarm_clk_lock, irq_flags);
73}
74
54/* IRQ Handlers */ 75/* IRQ Handlers */
55 76
56static irqreturn_t s3c_rtc_alarmirq(int irq, void *id) 77static irqreturn_t s3c_rtc_alarmirq(int irq, void *id)
@@ -64,6 +85,9 @@ static irqreturn_t s3c_rtc_alarmirq(int irq, void *id)
64 writeb(S3C2410_INTP_ALM, s3c_rtc_base + S3C2410_INTP); 85 writeb(S3C2410_INTP_ALM, s3c_rtc_base + S3C2410_INTP);
65 86
66 clk_disable(rtc_clk); 87 clk_disable(rtc_clk);
88
89 s3c_rtc_alarm_clk_enable(false);
90
67 return IRQ_HANDLED; 91 return IRQ_HANDLED;
68} 92}
69 93
@@ -97,6 +121,8 @@ static int s3c_rtc_setaie(struct device *dev, unsigned int enabled)
97 writeb(tmp, s3c_rtc_base + S3C2410_RTCALM); 121 writeb(tmp, s3c_rtc_base + S3C2410_RTCALM);
98 clk_disable(rtc_clk); 122 clk_disable(rtc_clk);
99 123
124 s3c_rtc_alarm_clk_enable(enabled);
125
100 return 0; 126 return 0;
101} 127}
102 128
@@ -152,10 +178,6 @@ static int s3c_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
152 goto retry_get_time; 178 goto retry_get_time;
153 } 179 }
154 180
155 pr_debug("read time %04d.%02d.%02d %02d:%02d:%02d\n",
156 1900 + rtc_tm->tm_year, rtc_tm->tm_mon, rtc_tm->tm_mday,
157 rtc_tm->tm_hour, rtc_tm->tm_min, rtc_tm->tm_sec);
158
159 rtc_tm->tm_sec = bcd2bin(rtc_tm->tm_sec); 181 rtc_tm->tm_sec = bcd2bin(rtc_tm->tm_sec);
160 rtc_tm->tm_min = bcd2bin(rtc_tm->tm_min); 182 rtc_tm->tm_min = bcd2bin(rtc_tm->tm_min);
161 rtc_tm->tm_hour = bcd2bin(rtc_tm->tm_hour); 183 rtc_tm->tm_hour = bcd2bin(rtc_tm->tm_hour);
@@ -164,6 +186,11 @@ static int s3c_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
164 rtc_tm->tm_year = bcd2bin(rtc_tm->tm_year); 186 rtc_tm->tm_year = bcd2bin(rtc_tm->tm_year);
165 187
166 rtc_tm->tm_year += 100; 188 rtc_tm->tm_year += 100;
189
190 pr_debug("read time %04d.%02d.%02d %02d:%02d:%02d\n",
191 1900 + rtc_tm->tm_year, rtc_tm->tm_mon, rtc_tm->tm_mday,
192 rtc_tm->tm_hour, rtc_tm->tm_min, rtc_tm->tm_sec);
193
167 rtc_tm->tm_mon -= 1; 194 rtc_tm->tm_mon -= 1;
168 195
169 clk_disable(rtc_clk); 196 clk_disable(rtc_clk);
@@ -269,10 +296,9 @@ static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
269 clk_enable(rtc_clk); 296 clk_enable(rtc_clk);
270 pr_debug("s3c_rtc_setalarm: %d, %04d.%02d.%02d %02d:%02d:%02d\n", 297 pr_debug("s3c_rtc_setalarm: %d, %04d.%02d.%02d %02d:%02d:%02d\n",
271 alrm->enabled, 298 alrm->enabled,
272 1900 + tm->tm_year, tm->tm_mon, tm->tm_mday, 299 1900 + tm->tm_year, tm->tm_mon + 1, tm->tm_mday,
273 tm->tm_hour, tm->tm_min, tm->tm_sec); 300 tm->tm_hour, tm->tm_min, tm->tm_sec);
274 301
275
276 alrm_en = readb(base + S3C2410_RTCALM) & S3C2410_RTCALM_ALMEN; 302 alrm_en = readb(base + S3C2410_RTCALM) & S3C2410_RTCALM_ALMEN;
277 writeb(0x00, base + S3C2410_RTCALM); 303 writeb(0x00, base + S3C2410_RTCALM);
278 304
@@ -319,49 +345,7 @@ static int s3c_rtc_proc(struct device *dev, struct seq_file *seq)
319 return 0; 345 return 0;
320} 346}
321 347
322static int s3c_rtc_open(struct device *dev)
323{
324 struct platform_device *pdev = to_platform_device(dev);
325 struct rtc_device *rtc_dev = platform_get_drvdata(pdev);
326 int ret;
327
328 ret = request_irq(s3c_rtc_alarmno, s3c_rtc_alarmirq,
329 IRQF_DISABLED, "s3c2410-rtc alarm", rtc_dev);
330
331 if (ret) {
332 dev_err(dev, "IRQ%d error %d\n", s3c_rtc_alarmno, ret);
333 return ret;
334 }
335
336 ret = request_irq(s3c_rtc_tickno, s3c_rtc_tickirq,
337 IRQF_DISABLED, "s3c2410-rtc tick", rtc_dev);
338
339 if (ret) {
340 dev_err(dev, "IRQ%d error %d\n", s3c_rtc_tickno, ret);
341 goto tick_err;
342 }
343
344 return ret;
345
346 tick_err:
347 free_irq(s3c_rtc_alarmno, rtc_dev);
348 return ret;
349}
350
351static void s3c_rtc_release(struct device *dev)
352{
353 struct platform_device *pdev = to_platform_device(dev);
354 struct rtc_device *rtc_dev = platform_get_drvdata(pdev);
355
356 /* do not clear AIE here, it may be needed for wake */
357
358 free_irq(s3c_rtc_alarmno, rtc_dev);
359 free_irq(s3c_rtc_tickno, rtc_dev);
360}
361
362static const struct rtc_class_ops s3c_rtcops = { 348static const struct rtc_class_ops s3c_rtcops = {
363 .open = s3c_rtc_open,
364 .release = s3c_rtc_release,
365 .read_time = s3c_rtc_gettime, 349 .read_time = s3c_rtc_gettime,
366 .set_time = s3c_rtc_settime, 350 .set_time = s3c_rtc_settime,
367 .read_alarm = s3c_rtc_getalarm, 351 .read_alarm = s3c_rtc_getalarm,
@@ -425,6 +409,9 @@ static int __devexit s3c_rtc_remove(struct platform_device *dev)
425{ 409{
426 struct rtc_device *rtc = platform_get_drvdata(dev); 410 struct rtc_device *rtc = platform_get_drvdata(dev);
427 411
412 free_irq(s3c_rtc_alarmno, rtc);
413 free_irq(s3c_rtc_tickno, rtc);
414
428 platform_set_drvdata(dev, NULL); 415 platform_set_drvdata(dev, NULL);
429 rtc_device_unregister(rtc); 416 rtc_device_unregister(rtc);
430 417
@@ -548,10 +535,32 @@ static int __devinit s3c_rtc_probe(struct platform_device *pdev)
548 535
549 s3c_rtc_setfreq(&pdev->dev, 1); 536 s3c_rtc_setfreq(&pdev->dev, 1);
550 537
538 ret = request_irq(s3c_rtc_alarmno, s3c_rtc_alarmirq,
539 IRQF_DISABLED, "s3c2410-rtc alarm", rtc);
540 if (ret) {
541 dev_err(&pdev->dev, "IRQ%d error %d\n", s3c_rtc_alarmno, ret);
542 goto err_alarm_irq;
543 }
544
545 ret = request_irq(s3c_rtc_tickno, s3c_rtc_tickirq,
546 IRQF_DISABLED, "s3c2410-rtc tick", rtc);
547 if (ret) {
548 dev_err(&pdev->dev, "IRQ%d error %d\n", s3c_rtc_tickno, ret);
549 free_irq(s3c_rtc_alarmno, rtc);
550 goto err_tick_irq;
551 }
552
551 clk_disable(rtc_clk); 553 clk_disable(rtc_clk);
552 554
553 return 0; 555 return 0;
554 556
557 err_tick_irq:
558 free_irq(s3c_rtc_alarmno, rtc);
559
560 err_alarm_irq:
561 platform_set_drvdata(pdev, NULL);
562 rtc_device_unregister(rtc);
563
555 err_nortc: 564 err_nortc:
556 s3c_rtc_enable(pdev, 0); 565 s3c_rtc_enable(pdev, 0);
557 clk_disable(rtc_clk); 566 clk_disable(rtc_clk);
diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c
index 9a81f778d6b2..20687d55e7a7 100644
--- a/drivers/rtc/rtc-twl.c
+++ b/drivers/rtc/rtc-twl.c
@@ -362,14 +362,6 @@ static irqreturn_t twl_rtc_interrupt(int irq, void *rtc)
362 int res; 362 int res;
363 u8 rd_reg; 363 u8 rd_reg;
364 364
365#ifdef CONFIG_LOCKDEP
366 /* WORKAROUND for lockdep forcing IRQF_DISABLED on us, which
367 * we don't want and can't tolerate. Although it might be
368 * friendlier not to borrow this thread context...
369 */
370 local_irq_enable();
371#endif
372
373 res = twl_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG); 365 res = twl_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG);
374 if (res) 366 if (res)
375 goto out; 367 goto out;
@@ -428,24 +420,12 @@ static struct rtc_class_ops twl_rtc_ops = {
428static int __devinit twl_rtc_probe(struct platform_device *pdev) 420static int __devinit twl_rtc_probe(struct platform_device *pdev)
429{ 421{
430 struct rtc_device *rtc; 422 struct rtc_device *rtc;
431 int ret = 0; 423 int ret = -EINVAL;
432 int irq = platform_get_irq(pdev, 0); 424 int irq = platform_get_irq(pdev, 0);
433 u8 rd_reg; 425 u8 rd_reg;
434 426
435 if (irq <= 0) 427 if (irq <= 0)
436 return -EINVAL; 428 goto out1;
437
438 rtc = rtc_device_register(pdev->name,
439 &pdev->dev, &twl_rtc_ops, THIS_MODULE);
440 if (IS_ERR(rtc)) {
441 ret = PTR_ERR(rtc);
442 dev_err(&pdev->dev, "can't register RTC device, err %ld\n",
443 PTR_ERR(rtc));
444 goto out0;
445
446 }
447
448 platform_set_drvdata(pdev, rtc);
449 429
450 ret = twl_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG); 430 ret = twl_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG);
451 if (ret < 0) 431 if (ret < 0)
@@ -462,14 +442,6 @@ static int __devinit twl_rtc_probe(struct platform_device *pdev)
462 if (ret < 0) 442 if (ret < 0)
463 goto out1; 443 goto out1;
464 444
465 ret = request_irq(irq, twl_rtc_interrupt,
466 IRQF_TRIGGER_RISING,
467 dev_name(&rtc->dev), rtc);
468 if (ret < 0) {
469 dev_err(&pdev->dev, "IRQ is not free.\n");
470 goto out1;
471 }
472
473 if (twl_class_is_6030()) { 445 if (twl_class_is_6030()) {
474 twl6030_interrupt_unmask(TWL6030_RTC_INT_MASK, 446 twl6030_interrupt_unmask(TWL6030_RTC_INT_MASK,
475 REG_INT_MSK_LINE_A); 447 REG_INT_MSK_LINE_A);
@@ -480,28 +452,44 @@ static int __devinit twl_rtc_probe(struct platform_device *pdev)
480 /* Check RTC module status, Enable if it is off */ 452 /* Check RTC module status, Enable if it is off */
481 ret = twl_rtc_read_u8(&rd_reg, REG_RTC_CTRL_REG); 453 ret = twl_rtc_read_u8(&rd_reg, REG_RTC_CTRL_REG);
482 if (ret < 0) 454 if (ret < 0)
483 goto out2; 455 goto out1;
484 456
485 if (!(rd_reg & BIT_RTC_CTRL_REG_STOP_RTC_M)) { 457 if (!(rd_reg & BIT_RTC_CTRL_REG_STOP_RTC_M)) {
486 dev_info(&pdev->dev, "Enabling TWL-RTC.\n"); 458 dev_info(&pdev->dev, "Enabling TWL-RTC.\n");
487 rd_reg = BIT_RTC_CTRL_REG_STOP_RTC_M; 459 rd_reg = BIT_RTC_CTRL_REG_STOP_RTC_M;
488 ret = twl_rtc_write_u8(rd_reg, REG_RTC_CTRL_REG); 460 ret = twl_rtc_write_u8(rd_reg, REG_RTC_CTRL_REG);
489 if (ret < 0) 461 if (ret < 0)
490 goto out2; 462 goto out1;
491 } 463 }
492 464
493 /* init cached IRQ enable bits */ 465 /* init cached IRQ enable bits */
494 ret = twl_rtc_read_u8(&rtc_irq_bits, REG_RTC_INTERRUPTS_REG); 466 ret = twl_rtc_read_u8(&rtc_irq_bits, REG_RTC_INTERRUPTS_REG);
495 if (ret < 0) 467 if (ret < 0)
468 goto out1;
469
470 rtc = rtc_device_register(pdev->name,
471 &pdev->dev, &twl_rtc_ops, THIS_MODULE);
472 if (IS_ERR(rtc)) {
473 ret = PTR_ERR(rtc);
474 dev_err(&pdev->dev, "can't register RTC device, err %ld\n",
475 PTR_ERR(rtc));
476 goto out1;
477 }
478
479 ret = request_threaded_irq(irq, NULL, twl_rtc_interrupt,
480 IRQF_TRIGGER_RISING,
481 dev_name(&rtc->dev), rtc);
482 if (ret < 0) {
483 dev_err(&pdev->dev, "IRQ is not free.\n");
496 goto out2; 484 goto out2;
485 }
497 486
498 return ret; 487 platform_set_drvdata(pdev, rtc);
488 return 0;
499 489
500out2: 490out2:
501 free_irq(irq, rtc);
502out1:
503 rtc_device_unregister(rtc); 491 rtc_device_unregister(rtc);
504out0: 492out1:
505 return ret; 493 return ret;
506} 494}
507 495
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index eb4e034378cd..f1a2016829fc 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -249,6 +249,7 @@ static int dasd_ioctl_reset_profile(struct dasd_block *block)
249static int dasd_ioctl_read_profile(struct dasd_block *block, void __user *argp) 249static int dasd_ioctl_read_profile(struct dasd_block *block, void __user *argp)
250{ 250{
251 struct dasd_profile_info_t *data; 251 struct dasd_profile_info_t *data;
252 int rc = 0;
252 253
253 data = kmalloc(sizeof(*data), GFP_KERNEL); 254 data = kmalloc(sizeof(*data), GFP_KERNEL);
254 if (!data) 255 if (!data)
@@ -279,11 +280,14 @@ static int dasd_ioctl_read_profile(struct dasd_block *block, void __user *argp)
279 spin_unlock_bh(&block->profile.lock); 280 spin_unlock_bh(&block->profile.lock);
280 } else { 281 } else {
281 spin_unlock_bh(&block->profile.lock); 282 spin_unlock_bh(&block->profile.lock);
282 return -EIO; 283 rc = -EIO;
284 goto out;
283 } 285 }
284 if (copy_to_user(argp, data, sizeof(*data))) 286 if (copy_to_user(argp, data, sizeof(*data)))
285 return -EFAULT; 287 rc = -EFAULT;
286 return 0; 288out:
289 kfree(data);
290 return rc;
287} 291}
288#else 292#else
289static int dasd_ioctl_reset_profile(struct dasd_block *block) 293static int dasd_ioctl_reset_profile(struct dasd_block *block)
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index be55fb2b1b1c..837e010299a8 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -383,8 +383,10 @@ static int sclp_attach_storage(u8 id)
383 switch (sccb->header.response_code) { 383 switch (sccb->header.response_code) {
384 case 0x0020: 384 case 0x0020:
385 set_bit(id, sclp_storage_ids); 385 set_bit(id, sclp_storage_ids);
386 for (i = 0; i < sccb->assigned; i++) 386 for (i = 0; i < sccb->assigned; i++) {
387 sclp_unassign_storage(sccb->entries[i] >> 16); 387 if (sccb->entries[i])
388 sclp_unassign_storage(sccb->entries[i] >> 16);
389 }
388 break; 390 break;
389 default: 391 default:
390 rc = -EIO; 392 rc = -EIO;
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index cbde448f9947..eb3140ee821e 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -654,8 +654,8 @@ static struct io_subchannel_private console_priv;
654static int console_subchannel_in_use; 654static int console_subchannel_in_use;
655 655
656/* 656/*
657 * Use tpi to get a pending interrupt, call the interrupt handler and 657 * Use cio_tpi to get a pending interrupt and call the interrupt handler.
658 * return a pointer to the subchannel structure. 658 * Return non-zero if an interrupt was processed, zero otherwise.
659 */ 659 */
660static int cio_tpi(void) 660static int cio_tpi(void)
661{ 661{
@@ -667,6 +667,10 @@ static int cio_tpi(void)
667 tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id; 667 tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id;
668 if (tpi(NULL) != 1) 668 if (tpi(NULL) != 1)
669 return 0; 669 return 0;
670 if (tpi_info->adapter_IO) {
671 do_adapter_IO(tpi_info->isc);
672 return 1;
673 }
670 irb = (struct irb *)&S390_lowcore.irb; 674 irb = (struct irb *)&S390_lowcore.irb;
671 /* Store interrupt response block to lowcore. */ 675 /* Store interrupt response block to lowcore. */
672 if (tsch(tpi_info->schid, irb) != 0) 676 if (tsch(tpi_info->schid, irb) != 0)
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index b7bd5b0cc7aa..3868ab2397c6 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -1800,10 +1800,12 @@ static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_
1800 switch (retval) { 1800 switch (retval) {
1801 case SCSI_MLQUEUE_HOST_BUSY: 1801 case SCSI_MLQUEUE_HOST_BUSY:
1802 twa_free_request_id(tw_dev, request_id); 1802 twa_free_request_id(tw_dev, request_id);
1803 twa_unmap_scsi_data(tw_dev, request_id);
1803 break; 1804 break;
1804 case 1: 1805 case 1:
1805 tw_dev->state[request_id] = TW_S_COMPLETED; 1806 tw_dev->state[request_id] = TW_S_COMPLETED;
1806 twa_free_request_id(tw_dev, request_id); 1807 twa_free_request_id(tw_dev, request_id);
1808 twa_unmap_scsi_data(tw_dev, request_id);
1807 SCpnt->result = (DID_ERROR << 16); 1809 SCpnt->result = (DID_ERROR << 16);
1808 done(SCpnt); 1810 done(SCpnt);
1809 retval = 0; 1811 retval = 0;
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 8d9dae89f065..3878b7395081 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -837,6 +837,7 @@ config SCSI_ISCI
837 # (temporary): known alpha quality driver 837 # (temporary): known alpha quality driver
838 depends on EXPERIMENTAL 838 depends on EXPERIMENTAL
839 select SCSI_SAS_LIBSAS 839 select SCSI_SAS_LIBSAS
840 select SCSI_SAS_HOST_SMP
840 ---help--- 841 ---help---
841 This driver supports the 6Gb/s SAS capabilities of the storage 842 This driver supports the 6Gb/s SAS capabilities of the storage
842 control unit found in the Intel(R) C600 series chipset. 843 control unit found in the Intel(R) C600 series chipset.
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 3c08f5352b2d..6153a66a8a31 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -88,7 +88,7 @@ obj-$(CONFIG_SCSI_QLOGIC_FAS) += qlogicfas408.o qlogicfas.o
88obj-$(CONFIG_PCMCIA_QLOGIC) += qlogicfas408.o 88obj-$(CONFIG_PCMCIA_QLOGIC) += qlogicfas408.o
89obj-$(CONFIG_SCSI_QLOGIC_1280) += qla1280.o 89obj-$(CONFIG_SCSI_QLOGIC_1280) += qla1280.o
90obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx/ 90obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx/
91obj-$(CONFIG_SCSI_QLA_ISCSI) += qla4xxx/ 91obj-$(CONFIG_SCSI_QLA_ISCSI) += libiscsi.o qla4xxx/
92obj-$(CONFIG_SCSI_LPFC) += lpfc/ 92obj-$(CONFIG_SCSI_LPFC) += lpfc/
93obj-$(CONFIG_SCSI_BFA_FC) += bfa/ 93obj-$(CONFIG_SCSI_BFA_FC) += bfa/
94obj-$(CONFIG_SCSI_PAS16) += pas16.o 94obj-$(CONFIG_SCSI_PAS16) += pas16.o
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index e7d0d47b9185..e5f2d7d9002e 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -1283,6 +1283,8 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced)
1283 kfree(aac->queues); 1283 kfree(aac->queues);
1284 aac->queues = NULL; 1284 aac->queues = NULL;
1285 free_irq(aac->pdev->irq, aac); 1285 free_irq(aac->pdev->irq, aac);
1286 if (aac->msi)
1287 pci_disable_msi(aac->pdev);
1286 kfree(aac->fsa_dev); 1288 kfree(aac->fsa_dev);
1287 aac->fsa_dev = NULL; 1289 aac->fsa_dev = NULL;
1288 quirks = aac_get_driver_ident(index)->quirks; 1290 quirks = aac_get_driver_ident(index)->quirks;
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index 9ae80cd5953b..dba72a4e6a1c 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -563,7 +563,7 @@ int bnx2i_send_iscsi_nopout(struct bnx2i_conn *bnx2i_conn,
563 nopout_wqe->itt = ((u16)task->itt | 563 nopout_wqe->itt = ((u16)task->itt |
564 (ISCSI_TASK_TYPE_MPATH << 564 (ISCSI_TASK_TYPE_MPATH <<
565 ISCSI_TMF_REQUEST_TYPE_SHIFT)); 565 ISCSI_TMF_REQUEST_TYPE_SHIFT));
566 nopout_wqe->ttt = nopout_hdr->ttt; 566 nopout_wqe->ttt = be32_to_cpu(nopout_hdr->ttt);
567 nopout_wqe->flags = 0; 567 nopout_wqe->flags = 0;
568 if (!unsol) 568 if (!unsol)
569 nopout_wqe->flags = ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION; 569 nopout_wqe->flags = ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION;
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
index bd22041e2789..f58644850333 100644
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
@@ -913,7 +913,7 @@ static void l2t_put(struct cxgbi_sock *csk)
913 struct t3cdev *t3dev = (struct t3cdev *)csk->cdev->lldev; 913 struct t3cdev *t3dev = (struct t3cdev *)csk->cdev->lldev;
914 914
915 if (csk->l2t) { 915 if (csk->l2t) {
916 l2t_release(L2DATA(t3dev), csk->l2t); 916 l2t_release(t3dev, csk->l2t);
917 csk->l2t = NULL; 917 csk->l2t = NULL;
918 cxgbi_sock_put(csk); 918 cxgbi_sock_put(csk);
919 } 919 }
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index ba710e350ac5..5d0e9a24ae94 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -432,6 +432,8 @@ void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
432 u8 flogi_maddr[ETH_ALEN]; 432 u8 flogi_maddr[ETH_ALEN];
433 const struct net_device_ops *ops; 433 const struct net_device_ops *ops;
434 434
435 rtnl_lock();
436
435 /* 437 /*
436 * Don't listen for Ethernet packets anymore. 438 * Don't listen for Ethernet packets anymore.
437 * synchronize_net() ensures that the packet handlers are not running 439 * synchronize_net() ensures that the packet handlers are not running
@@ -461,6 +463,8 @@ void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
461 " specific feature for LLD.\n"); 463 " specific feature for LLD.\n");
462 } 464 }
463 465
466 rtnl_unlock();
467
464 /* Release the self-reference taken during fcoe_interface_create() */ 468 /* Release the self-reference taken during fcoe_interface_create() */
465 fcoe_interface_put(fcoe); 469 fcoe_interface_put(fcoe);
466} 470}
@@ -1951,11 +1955,8 @@ static void fcoe_destroy_work(struct work_struct *work)
1951 fcoe_if_destroy(port->lport); 1955 fcoe_if_destroy(port->lport);
1952 1956
1953 /* Do not tear down the fcoe interface for NPIV port */ 1957 /* Do not tear down the fcoe interface for NPIV port */
1954 if (!npiv) { 1958 if (!npiv)
1955 rtnl_lock();
1956 fcoe_interface_cleanup(fcoe); 1959 fcoe_interface_cleanup(fcoe);
1957 rtnl_unlock();
1958 }
1959 1960
1960 mutex_unlock(&fcoe_config_mutex); 1961 mutex_unlock(&fcoe_config_mutex);
1961} 1962}
@@ -2009,8 +2010,9 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
2009 printk(KERN_ERR "fcoe: Failed to create interface (%s)\n", 2010 printk(KERN_ERR "fcoe: Failed to create interface (%s)\n",
2010 netdev->name); 2011 netdev->name);
2011 rc = -EIO; 2012 rc = -EIO;
2013 rtnl_unlock();
2012 fcoe_interface_cleanup(fcoe); 2014 fcoe_interface_cleanup(fcoe);
2013 goto out_nodev; 2015 goto out_nortnl;
2014 } 2016 }
2015 2017
2016 /* Make this the "master" N_Port */ 2018 /* Make this the "master" N_Port */
@@ -2027,6 +2029,7 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
2027 2029
2028out_nodev: 2030out_nodev:
2029 rtnl_unlock(); 2031 rtnl_unlock();
2032out_nortnl:
2030 mutex_unlock(&fcoe_config_mutex); 2033 mutex_unlock(&fcoe_config_mutex);
2031 return rc; 2034 return rc;
2032} 2035}
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index ec61bdb833ac..b200b736b000 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -676,6 +676,16 @@ static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno,
676 BUG_ON(entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA); 676 BUG_ON(entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA);
677 removed[*nremoved] = h->dev[entry]; 677 removed[*nremoved] = h->dev[entry];
678 (*nremoved)++; 678 (*nremoved)++;
679
680 /*
681 * New physical devices won't have target/lun assigned yet
682 * so we need to preserve the values in the slot we are replacing.
683 */
684 if (new_entry->target == -1) {
685 new_entry->target = h->dev[entry]->target;
686 new_entry->lun = h->dev[entry]->lun;
687 }
688
679 h->dev[entry] = new_entry; 689 h->dev[entry] = new_entry;
680 added[*nadded] = new_entry; 690 added[*nadded] = new_entry;
681 (*nadded)++; 691 (*nadded)++;
@@ -1548,10 +1558,17 @@ static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
1548} 1558}
1549 1559
1550static int hpsa_update_device_info(struct ctlr_info *h, 1560static int hpsa_update_device_info(struct ctlr_info *h,
1551 unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device) 1561 unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
1562 unsigned char *is_OBDR_device)
1552{ 1563{
1553#define OBDR_TAPE_INQ_SIZE 49 1564
1565#define OBDR_SIG_OFFSET 43
1566#define OBDR_TAPE_SIG "$DR-10"
1567#define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
1568#define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
1569
1554 unsigned char *inq_buff; 1570 unsigned char *inq_buff;
1571 unsigned char *obdr_sig;
1555 1572
1556 inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL); 1573 inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
1557 if (!inq_buff) 1574 if (!inq_buff)
@@ -1583,6 +1600,16 @@ static int hpsa_update_device_info(struct ctlr_info *h,
1583 else 1600 else
1584 this_device->raid_level = RAID_UNKNOWN; 1601 this_device->raid_level = RAID_UNKNOWN;
1585 1602
1603 if (is_OBDR_device) {
1604 /* See if this is a One-Button-Disaster-Recovery device
1605 * by looking for "$DR-10" at offset 43 in inquiry data.
1606 */
1607 obdr_sig = &inq_buff[OBDR_SIG_OFFSET];
1608 *is_OBDR_device = (this_device->devtype == TYPE_ROM &&
1609 strncmp(obdr_sig, OBDR_TAPE_SIG,
1610 OBDR_SIG_LEN) == 0);
1611 }
1612
1586 kfree(inq_buff); 1613 kfree(inq_buff);
1587 return 0; 1614 return 0;
1588 1615
@@ -1716,7 +1743,7 @@ static int add_msa2xxx_enclosure_device(struct ctlr_info *h,
1716 return 0; 1743 return 0;
1717 } 1744 }
1718 1745
1719 if (hpsa_update_device_info(h, scsi3addr, this_device)) 1746 if (hpsa_update_device_info(h, scsi3addr, this_device, NULL))
1720 return 0; 1747 return 0;
1721 (*nmsa2xxx_enclosures)++; 1748 (*nmsa2xxx_enclosures)++;
1722 hpsa_set_bus_target_lun(this_device, bus, target, 0); 1749 hpsa_set_bus_target_lun(this_device, bus, target, 0);
@@ -1808,7 +1835,6 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
1808 */ 1835 */
1809 struct ReportLUNdata *physdev_list = NULL; 1836 struct ReportLUNdata *physdev_list = NULL;
1810 struct ReportLUNdata *logdev_list = NULL; 1837 struct ReportLUNdata *logdev_list = NULL;
1811 unsigned char *inq_buff = NULL;
1812 u32 nphysicals = 0; 1838 u32 nphysicals = 0;
1813 u32 nlogicals = 0; 1839 u32 nlogicals = 0;
1814 u32 ndev_allocated = 0; 1840 u32 ndev_allocated = 0;
@@ -1824,11 +1850,9 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
1824 GFP_KERNEL); 1850 GFP_KERNEL);
1825 physdev_list = kzalloc(reportlunsize, GFP_KERNEL); 1851 physdev_list = kzalloc(reportlunsize, GFP_KERNEL);
1826 logdev_list = kzalloc(reportlunsize, GFP_KERNEL); 1852 logdev_list = kzalloc(reportlunsize, GFP_KERNEL);
1827 inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
1828 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL); 1853 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
1829 1854
1830 if (!currentsd || !physdev_list || !logdev_list || 1855 if (!currentsd || !physdev_list || !logdev_list || !tmpdevice) {
1831 !inq_buff || !tmpdevice) {
1832 dev_err(&h->pdev->dev, "out of memory\n"); 1856 dev_err(&h->pdev->dev, "out of memory\n");
1833 goto out; 1857 goto out;
1834 } 1858 }
@@ -1863,7 +1887,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
1863 /* adjust our table of devices */ 1887 /* adjust our table of devices */
1864 nmsa2xxx_enclosures = 0; 1888 nmsa2xxx_enclosures = 0;
1865 for (i = 0; i < nphysicals + nlogicals + 1; i++) { 1889 for (i = 0; i < nphysicals + nlogicals + 1; i++) {
1866 u8 *lunaddrbytes; 1890 u8 *lunaddrbytes, is_OBDR = 0;
1867 1891
1868 /* Figure out where the LUN ID info is coming from */ 1892 /* Figure out where the LUN ID info is coming from */
1869 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position, 1893 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
@@ -1874,7 +1898,8 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
1874 continue; 1898 continue;
1875 1899
1876 /* Get device type, vendor, model, device id */ 1900 /* Get device type, vendor, model, device id */
1877 if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice)) 1901 if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
1902 &is_OBDR))
1878 continue; /* skip it if we can't talk to it. */ 1903 continue; /* skip it if we can't talk to it. */
1879 figure_bus_target_lun(h, lunaddrbytes, &bus, &target, &lun, 1904 figure_bus_target_lun(h, lunaddrbytes, &bus, &target, &lun,
1880 tmpdevice); 1905 tmpdevice);
@@ -1898,7 +1923,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
1898 hpsa_set_bus_target_lun(this_device, bus, target, lun); 1923 hpsa_set_bus_target_lun(this_device, bus, target, lun);
1899 1924
1900 switch (this_device->devtype) { 1925 switch (this_device->devtype) {
1901 case TYPE_ROM: { 1926 case TYPE_ROM:
1902 /* We don't *really* support actual CD-ROM devices, 1927 /* We don't *really* support actual CD-ROM devices,
1903 * just "One Button Disaster Recovery" tape drive 1928 * just "One Button Disaster Recovery" tape drive
1904 * which temporarily pretends to be a CD-ROM drive. 1929 * which temporarily pretends to be a CD-ROM drive.
@@ -1906,15 +1931,8 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
1906 * device by checking for "$DR-10" in bytes 43-48 of 1931 * device by checking for "$DR-10" in bytes 43-48 of
1907 * the inquiry data. 1932 * the inquiry data.
1908 */ 1933 */
1909 char obdr_sig[7]; 1934 if (is_OBDR)
1910#define OBDR_TAPE_SIG "$DR-10" 1935 ncurrent++;
1911 strncpy(obdr_sig, &inq_buff[43], 6);
1912 obdr_sig[6] = '\0';
1913 if (strncmp(obdr_sig, OBDR_TAPE_SIG, 6) != 0)
1914 /* Not OBDR device, ignore it. */
1915 break;
1916 }
1917 ncurrent++;
1918 break; 1936 break;
1919 case TYPE_DISK: 1937 case TYPE_DISK:
1920 if (i < nphysicals) 1938 if (i < nphysicals)
@@ -1947,7 +1965,6 @@ out:
1947 for (i = 0; i < ndev_allocated; i++) 1965 for (i = 0; i < ndev_allocated; i++)
1948 kfree(currentsd[i]); 1966 kfree(currentsd[i]);
1949 kfree(currentsd); 1967 kfree(currentsd);
1950 kfree(inq_buff);
1951 kfree(physdev_list); 1968 kfree(physdev_list);
1952 kfree(logdev_list); 1969 kfree(logdev_list);
1953} 1970}
diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c
index 26072f1e9852..6981b773a88d 100644
--- a/drivers/scsi/isci/host.c
+++ b/drivers/scsi/isci/host.c
@@ -531,6 +531,9 @@ static void sci_controller_process_completions(struct isci_host *ihost)
531 break; 531 break;
532 532
533 case SCU_COMPLETION_TYPE_EVENT: 533 case SCU_COMPLETION_TYPE_EVENT:
534 sci_controller_event_completion(ihost, ent);
535 break;
536
534 case SCU_COMPLETION_TYPE_NOTIFY: { 537 case SCU_COMPLETION_TYPE_NOTIFY: {
535 event_cycle ^= ((event_get+1) & SCU_MAX_EVENTS) << 538 event_cycle ^= ((event_get+1) & SCU_MAX_EVENTS) <<
536 (SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_SHIFT - SCU_MAX_EVENTS_SHIFT); 539 (SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_SHIFT - SCU_MAX_EVENTS_SHIFT);
@@ -1091,6 +1094,7 @@ static void isci_host_completion_routine(unsigned long data)
1091 struct isci_request *request; 1094 struct isci_request *request;
1092 struct isci_request *next_request; 1095 struct isci_request *next_request;
1093 struct sas_task *task; 1096 struct sas_task *task;
1097 u16 active;
1094 1098
1095 INIT_LIST_HEAD(&completed_request_list); 1099 INIT_LIST_HEAD(&completed_request_list);
1096 INIT_LIST_HEAD(&errored_request_list); 1100 INIT_LIST_HEAD(&errored_request_list);
@@ -1181,6 +1185,13 @@ static void isci_host_completion_routine(unsigned long data)
1181 } 1185 }
1182 } 1186 }
1183 1187
1188 /* the coalesence timeout doubles at each encoding step, so
1189 * update it based on the ilog2 value of the outstanding requests
1190 */
1191 active = isci_tci_active(ihost);
1192 writel(SMU_ICC_GEN_VAL(NUMBER, active) |
1193 SMU_ICC_GEN_VAL(TIMER, ISCI_COALESCE_BASE + ilog2(active)),
1194 &ihost->smu_registers->interrupt_coalesce_control);
1184} 1195}
1185 1196
1186/** 1197/**
@@ -1471,7 +1482,7 @@ static void sci_controller_ready_state_enter(struct sci_base_state_machine *sm)
1471 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); 1482 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1472 1483
1473 /* set the default interrupt coalescence number and timeout value. */ 1484 /* set the default interrupt coalescence number and timeout value. */
1474 sci_controller_set_interrupt_coalescence(ihost, 0x10, 250); 1485 sci_controller_set_interrupt_coalescence(ihost, 0, 0);
1475} 1486}
1476 1487
1477static void sci_controller_ready_state_exit(struct sci_base_state_machine *sm) 1488static void sci_controller_ready_state_exit(struct sci_base_state_machine *sm)
diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h
index 062101a39f79..9f33831a2f04 100644
--- a/drivers/scsi/isci/host.h
+++ b/drivers/scsi/isci/host.h
@@ -369,6 +369,9 @@ static inline struct isci_host *dev_to_ihost(struct domain_device *dev)
369#define ISCI_TAG_SEQ(tag) (((tag) >> 12) & (SCI_MAX_SEQ-1)) 369#define ISCI_TAG_SEQ(tag) (((tag) >> 12) & (SCI_MAX_SEQ-1))
370#define ISCI_TAG_TCI(tag) ((tag) & (SCI_MAX_IO_REQUESTS-1)) 370#define ISCI_TAG_TCI(tag) ((tag) & (SCI_MAX_IO_REQUESTS-1))
371 371
372/* interrupt coalescing baseline: 9 == 3 to 5us interrupt delay per command */
373#define ISCI_COALESCE_BASE 9
374
372/* expander attached sata devices require 3 rnc slots */ 375/* expander attached sata devices require 3 rnc slots */
373static inline int sci_remote_device_node_count(struct isci_remote_device *idev) 376static inline int sci_remote_device_node_count(struct isci_remote_device *idev)
374{ 377{
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index 61e0d09e2b57..29aa34efb0f5 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -59,10 +59,19 @@
59#include <linux/firmware.h> 59#include <linux/firmware.h>
60#include <linux/efi.h> 60#include <linux/efi.h>
61#include <asm/string.h> 61#include <asm/string.h>
62#include <scsi/scsi_host.h>
62#include "isci.h" 63#include "isci.h"
63#include "task.h" 64#include "task.h"
64#include "probe_roms.h" 65#include "probe_roms.h"
65 66
67#define MAJ 1
68#define MIN 0
69#define BUILD 0
70#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
71 __stringify(BUILD)
72
73MODULE_VERSION(DRV_VERSION);
74
66static struct scsi_transport_template *isci_transport_template; 75static struct scsi_transport_template *isci_transport_template;
67 76
68static DEFINE_PCI_DEVICE_TABLE(isci_id_table) = { 77static DEFINE_PCI_DEVICE_TABLE(isci_id_table) = {
@@ -113,6 +122,22 @@ unsigned char max_concurr_spinup = 1;
113module_param(max_concurr_spinup, byte, 0); 122module_param(max_concurr_spinup, byte, 0);
114MODULE_PARM_DESC(max_concurr_spinup, "Max concurrent device spinup"); 123MODULE_PARM_DESC(max_concurr_spinup, "Max concurrent device spinup");
115 124
125static ssize_t isci_show_id(struct device *dev, struct device_attribute *attr, char *buf)
126{
127 struct Scsi_Host *shost = container_of(dev, typeof(*shost), shost_dev);
128 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
129 struct isci_host *ihost = container_of(sas_ha, typeof(*ihost), sas_ha);
130
131 return snprintf(buf, PAGE_SIZE, "%d\n", ihost->id);
132}
133
134static DEVICE_ATTR(isci_id, S_IRUGO, isci_show_id, NULL);
135
136struct device_attribute *isci_host_attrs[] = {
137 &dev_attr_isci_id,
138 NULL
139};
140
116static struct scsi_host_template isci_sht = { 141static struct scsi_host_template isci_sht = {
117 142
118 .module = THIS_MODULE, 143 .module = THIS_MODULE,
@@ -138,6 +163,7 @@ static struct scsi_host_template isci_sht = {
138 .slave_alloc = sas_slave_alloc, 163 .slave_alloc = sas_slave_alloc,
139 .target_destroy = sas_target_destroy, 164 .target_destroy = sas_target_destroy,
140 .ioctl = sas_ioctl, 165 .ioctl = sas_ioctl,
166 .shost_attrs = isci_host_attrs,
141}; 167};
142 168
143static struct sas_domain_function_template isci_transport_ops = { 169static struct sas_domain_function_template isci_transport_ops = {
@@ -232,17 +258,6 @@ static int isci_register_sas_ha(struct isci_host *isci_host)
232 return 0; 258 return 0;
233} 259}
234 260
235static ssize_t isci_show_id(struct device *dev, struct device_attribute *attr, char *buf)
236{
237 struct Scsi_Host *shost = container_of(dev, typeof(*shost), shost_dev);
238 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
239 struct isci_host *ihost = container_of(sas_ha, typeof(*ihost), sas_ha);
240
241 return snprintf(buf, PAGE_SIZE, "%d\n", ihost->id);
242}
243
244static DEVICE_ATTR(isci_id, S_IRUGO, isci_show_id, NULL);
245
246static void isci_unregister(struct isci_host *isci_host) 261static void isci_unregister(struct isci_host *isci_host)
247{ 262{
248 struct Scsi_Host *shost; 263 struct Scsi_Host *shost;
@@ -251,7 +266,6 @@ static void isci_unregister(struct isci_host *isci_host)
251 return; 266 return;
252 267
253 shost = isci_host->shost; 268 shost = isci_host->shost;
254 device_remove_file(&shost->shost_dev, &dev_attr_isci_id);
255 269
256 sas_unregister_ha(&isci_host->sas_ha); 270 sas_unregister_ha(&isci_host->sas_ha);
257 271
@@ -415,14 +429,8 @@ static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id)
415 if (err) 429 if (err)
416 goto err_shost_remove; 430 goto err_shost_remove;
417 431
418 err = device_create_file(&shost->shost_dev, &dev_attr_isci_id);
419 if (err)
420 goto err_unregister_ha;
421
422 return isci_host; 432 return isci_host;
423 433
424 err_unregister_ha:
425 sas_unregister_ha(&(isci_host->sas_ha));
426 err_shost_remove: 434 err_shost_remove:
427 scsi_remove_host(shost); 435 scsi_remove_host(shost);
428 err_shost: 436 err_shost:
@@ -540,7 +548,8 @@ static __init int isci_init(void)
540{ 548{
541 int err; 549 int err;
542 550
543 pr_info("%s: Intel(R) C600 SAS Controller Driver\n", DRV_NAME); 551 pr_info("%s: Intel(R) C600 SAS Controller Driver - version %s\n",
552 DRV_NAME, DRV_VERSION);
544 553
545 isci_transport_template = sas_domain_attach_transport(&isci_transport_ops); 554 isci_transport_template = sas_domain_attach_transport(&isci_transport_ops);
546 if (!isci_transport_template) 555 if (!isci_transport_template)
diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c
index 79313a7a2356..430fc8ff014a 100644
--- a/drivers/scsi/isci/phy.c
+++ b/drivers/scsi/isci/phy.c
@@ -104,6 +104,7 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
104 u32 parity_count = 0; 104 u32 parity_count = 0;
105 u32 llctl, link_rate; 105 u32 llctl, link_rate;
106 u32 clksm_value = 0; 106 u32 clksm_value = 0;
107 u32 sp_timeouts = 0;
107 108
108 iphy->link_layer_registers = reg; 109 iphy->link_layer_registers = reg;
109 110
@@ -211,6 +212,18 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
211 llctl |= SCU_SAS_LLCTL_GEN_VAL(MAX_LINK_RATE, link_rate); 212 llctl |= SCU_SAS_LLCTL_GEN_VAL(MAX_LINK_RATE, link_rate);
212 writel(llctl, &iphy->link_layer_registers->link_layer_control); 213 writel(llctl, &iphy->link_layer_registers->link_layer_control);
213 214
215 sp_timeouts = readl(&iphy->link_layer_registers->sas_phy_timeouts);
216
217 /* Clear the default 0x36 (54us) RATE_CHANGE timeout value. */
218 sp_timeouts &= ~SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0xFF);
219
220 /* Set RATE_CHANGE timeout value to 0x3B (59us). This ensures SCU can
221 * lock with 3Gb drive when SCU max rate is set to 1.5Gb.
222 */
223 sp_timeouts |= SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0x3B);
224
225 writel(sp_timeouts, &iphy->link_layer_registers->sas_phy_timeouts);
226
214 if (is_a2(ihost->pdev)) { 227 if (is_a2(ihost->pdev)) {
215 /* Program the max ARB time for the PHY to 700us so we inter-operate with 228 /* Program the max ARB time for the PHY to 700us so we inter-operate with
216 * the PMC expander which shuts down PHYs if the expander PHY generates too 229 * the PMC expander which shuts down PHYs if the expander PHY generates too
diff --git a/drivers/scsi/isci/registers.h b/drivers/scsi/isci/registers.h
index 9b266c7428e8..00afc738bbed 100644
--- a/drivers/scsi/isci/registers.h
+++ b/drivers/scsi/isci/registers.h
@@ -1299,6 +1299,18 @@ struct scu_transport_layer_registers {
1299#define SCU_AFE_XCVRCR_OFFSET 0x00DC 1299#define SCU_AFE_XCVRCR_OFFSET 0x00DC
1300#define SCU_AFE_LUTCR_OFFSET 0x00E0 1300#define SCU_AFE_LUTCR_OFFSET 0x00E0
1301 1301
1302#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_ALIGN_DETECTION_SHIFT (0UL)
1303#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_ALIGN_DETECTION_MASK (0x000000FFUL)
1304#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_HOT_PLUG_SHIFT (8UL)
1305#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_HOT_PLUG_MASK (0x0000FF00UL)
1306#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_COMSAS_DETECTION_SHIFT (16UL)
1307#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_COMSAS_DETECTION_MASK (0x00FF0000UL)
1308#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_RATE_CHANGE_SHIFT (24UL)
1309#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_RATE_CHANGE_MASK (0xFF000000UL)
1310
1311#define SCU_SAS_PHYTOV_GEN_VAL(name, value) \
1312 SCU_GEN_VALUE(SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_##name, value)
1313
1302#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_SHIFT (0) 1314#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_SHIFT (0)
1303#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_MASK (0x00000003) 1315#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_MASK (0x00000003)
1304#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN1 (0) 1316#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN1 (0)
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index a46e07ac789f..b5d3a8c4d329 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -732,12 +732,20 @@ sci_io_request_terminate(struct isci_request *ireq)
732 sci_change_state(&ireq->sm, SCI_REQ_ABORTING); 732 sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
733 return SCI_SUCCESS; 733 return SCI_SUCCESS;
734 case SCI_REQ_TASK_WAIT_TC_RESP: 734 case SCI_REQ_TASK_WAIT_TC_RESP:
735 /* The task frame was already confirmed to have been
736 * sent by the SCU HW. Since the state machine is
737 * now only waiting for the task response itself,
738 * abort the request and complete it immediately
739 * and don't wait for the task response.
740 */
735 sci_change_state(&ireq->sm, SCI_REQ_ABORTING); 741 sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
736 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 742 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
737 return SCI_SUCCESS; 743 return SCI_SUCCESS;
738 case SCI_REQ_ABORTING: 744 case SCI_REQ_ABORTING:
739 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 745 /* If a request has a termination requested twice, return
740 return SCI_SUCCESS; 746 * a failure indication, since HW confirmation of the first
747 * abort is still outstanding.
748 */
741 case SCI_REQ_COMPLETED: 749 case SCI_REQ_COMPLETED:
742 default: 750 default:
743 dev_warn(&ireq->owning_controller->pdev->dev, 751 dev_warn(&ireq->owning_controller->pdev->dev,
@@ -2399,22 +2407,19 @@ static void isci_task_save_for_upper_layer_completion(
2399 } 2407 }
2400} 2408}
2401 2409
2402static void isci_request_process_stp_response(struct sas_task *task, 2410static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_fis *fis)
2403 void *response_buffer)
2404{ 2411{
2405 struct dev_to_host_fis *d2h_reg_fis = response_buffer;
2406 struct task_status_struct *ts = &task->task_status; 2412 struct task_status_struct *ts = &task->task_status;
2407 struct ata_task_resp *resp = (void *)&ts->buf[0]; 2413 struct ata_task_resp *resp = (void *)&ts->buf[0];
2408 2414
2409 resp->frame_len = le16_to_cpu(*(__le16 *)(response_buffer + 6)); 2415 resp->frame_len = sizeof(*fis);
2410 memcpy(&resp->ending_fis[0], response_buffer + 16, 24); 2416 memcpy(resp->ending_fis, fis, sizeof(*fis));
2411 ts->buf_valid_size = sizeof(*resp); 2417 ts->buf_valid_size = sizeof(*resp);
2412 2418
2413 /** 2419 /* If the device fault bit is set in the status register, then
2414 * If the device fault bit is set in the status register, then
2415 * set the sense data and return. 2420 * set the sense data and return.
2416 */ 2421 */
2417 if (d2h_reg_fis->status & ATA_DF) 2422 if (fis->status & ATA_DF)
2418 ts->stat = SAS_PROTO_RESPONSE; 2423 ts->stat = SAS_PROTO_RESPONSE;
2419 else 2424 else
2420 ts->stat = SAM_STAT_GOOD; 2425 ts->stat = SAM_STAT_GOOD;
@@ -2428,7 +2433,6 @@ static void isci_request_io_request_complete(struct isci_host *ihost,
2428{ 2433{
2429 struct sas_task *task = isci_request_access_task(request); 2434 struct sas_task *task = isci_request_access_task(request);
2430 struct ssp_response_iu *resp_iu; 2435 struct ssp_response_iu *resp_iu;
2431 void *resp_buf;
2432 unsigned long task_flags; 2436 unsigned long task_flags;
2433 struct isci_remote_device *idev = isci_lookup_device(task->dev); 2437 struct isci_remote_device *idev = isci_lookup_device(task->dev);
2434 enum service_response response = SAS_TASK_UNDELIVERED; 2438 enum service_response response = SAS_TASK_UNDELIVERED;
@@ -2565,9 +2569,7 @@ static void isci_request_io_request_complete(struct isci_host *ihost,
2565 task); 2569 task);
2566 2570
2567 if (sas_protocol_ata(task->task_proto)) { 2571 if (sas_protocol_ata(task->task_proto)) {
2568 resp_buf = &request->stp.rsp; 2572 isci_process_stp_response(task, &request->stp.rsp);
2569 isci_request_process_stp_response(task,
2570 resp_buf);
2571 } else if (SAS_PROTOCOL_SSP == task->task_proto) { 2573 } else if (SAS_PROTOCOL_SSP == task->task_proto) {
2572 2574
2573 /* crack the iu response buffer. */ 2575 /* crack the iu response buffer. */
diff --git a/drivers/scsi/isci/unsolicited_frame_control.c b/drivers/scsi/isci/unsolicited_frame_control.c
index e9e1e2abacb9..16f88ab939c8 100644
--- a/drivers/scsi/isci/unsolicited_frame_control.c
+++ b/drivers/scsi/isci/unsolicited_frame_control.c
@@ -72,7 +72,7 @@ int sci_unsolicited_frame_control_construct(struct isci_host *ihost)
72 */ 72 */
73 buf_len = SCU_MAX_UNSOLICITED_FRAMES * SCU_UNSOLICITED_FRAME_BUFFER_SIZE; 73 buf_len = SCU_MAX_UNSOLICITED_FRAMES * SCU_UNSOLICITED_FRAME_BUFFER_SIZE;
74 header_len = SCU_MAX_UNSOLICITED_FRAMES * sizeof(struct scu_unsolicited_frame_header); 74 header_len = SCU_MAX_UNSOLICITED_FRAMES * sizeof(struct scu_unsolicited_frame_header);
75 size = buf_len + header_len + SCU_MAX_UNSOLICITED_FRAMES * sizeof(dma_addr_t); 75 size = buf_len + header_len + SCU_MAX_UNSOLICITED_FRAMES * sizeof(uf_control->address_table.array[0]);
76 76
77 /* 77 /*
78 * The Unsolicited Frame buffers are set at the start of the UF 78 * The Unsolicited Frame buffers are set at the start of the UF
diff --git a/drivers/scsi/isci/unsolicited_frame_control.h b/drivers/scsi/isci/unsolicited_frame_control.h
index 31cb9506f52d..75d896686f5a 100644
--- a/drivers/scsi/isci/unsolicited_frame_control.h
+++ b/drivers/scsi/isci/unsolicited_frame_control.h
@@ -214,7 +214,7 @@ struct sci_uf_address_table_array {
214 * starting address of the UF address table. 214 * starting address of the UF address table.
215 * 64-bit pointers are required by the hardware. 215 * 64-bit pointers are required by the hardware.
216 */ 216 */
217 dma_addr_t *array; 217 u64 *array;
218 218
219 /** 219 /**
220 * This field specifies the physical address location for the UF 220 * This field specifies the physical address location for the UF
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 01ff082dc34c..d261e982a2fa 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -494,6 +494,9 @@ static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp,
494 */ 494 */
495 error = lport->tt.frame_send(lport, fp); 495 error = lport->tt.frame_send(lport, fp);
496 496
497 if (fh->fh_type == FC_TYPE_BLS)
498 return error;
499
497 /* 500 /*
498 * Update the exchange and sequence flags, 501 * Update the exchange and sequence flags,
499 * assuming all frames for the sequence have been sent. 502 * assuming all frames for the sequence have been sent.
@@ -575,42 +578,35 @@ static void fc_seq_set_resp(struct fc_seq *sp,
575} 578}
576 579
577/** 580/**
578 * fc_seq_exch_abort() - Abort an exchange and sequence 581 * fc_exch_abort_locked() - Abort an exchange
579 * @req_sp: The sequence to be aborted 582 * @ep: The exchange to be aborted
580 * @timer_msec: The period of time to wait before aborting 583 * @timer_msec: The period of time to wait before aborting
581 * 584 *
582 * Generally called because of a timeout or an abort from the upper layer. 585 * Locking notes: Called with exch lock held
586 *
587 * Return value: 0 on success else error code
583 */ 588 */
584static int fc_seq_exch_abort(const struct fc_seq *req_sp, 589static int fc_exch_abort_locked(struct fc_exch *ep,
585 unsigned int timer_msec) 590 unsigned int timer_msec)
586{ 591{
587 struct fc_seq *sp; 592 struct fc_seq *sp;
588 struct fc_exch *ep;
589 struct fc_frame *fp; 593 struct fc_frame *fp;
590 int error; 594 int error;
591 595
592 ep = fc_seq_exch(req_sp);
593
594 spin_lock_bh(&ep->ex_lock);
595 if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL) || 596 if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL) ||
596 ep->state & (FC_EX_DONE | FC_EX_RST_CLEANUP)) { 597 ep->state & (FC_EX_DONE | FC_EX_RST_CLEANUP))
597 spin_unlock_bh(&ep->ex_lock);
598 return -ENXIO; 598 return -ENXIO;
599 }
600 599
601 /* 600 /*
602 * Send the abort on a new sequence if possible. 601 * Send the abort on a new sequence if possible.
603 */ 602 */
604 sp = fc_seq_start_next_locked(&ep->seq); 603 sp = fc_seq_start_next_locked(&ep->seq);
605 if (!sp) { 604 if (!sp)
606 spin_unlock_bh(&ep->ex_lock);
607 return -ENOMEM; 605 return -ENOMEM;
608 }
609 606
610 ep->esb_stat |= ESB_ST_SEQ_INIT | ESB_ST_ABNORMAL; 607 ep->esb_stat |= ESB_ST_SEQ_INIT | ESB_ST_ABNORMAL;
611 if (timer_msec) 608 if (timer_msec)
612 fc_exch_timer_set_locked(ep, timer_msec); 609 fc_exch_timer_set_locked(ep, timer_msec);
613 spin_unlock_bh(&ep->ex_lock);
614 610
615 /* 611 /*
616 * If not logged into the fabric, don't send ABTS but leave 612 * If not logged into the fabric, don't send ABTS but leave
@@ -633,6 +629,28 @@ static int fc_seq_exch_abort(const struct fc_seq *req_sp,
633} 629}
634 630
635/** 631/**
632 * fc_seq_exch_abort() - Abort an exchange and sequence
633 * @req_sp: The sequence to be aborted
634 * @timer_msec: The period of time to wait before aborting
635 *
636 * Generally called because of a timeout or an abort from the upper layer.
637 *
638 * Return value: 0 on success else error code
639 */
640static int fc_seq_exch_abort(const struct fc_seq *req_sp,
641 unsigned int timer_msec)
642{
643 struct fc_exch *ep;
644 int error;
645
646 ep = fc_seq_exch(req_sp);
647 spin_lock_bh(&ep->ex_lock);
648 error = fc_exch_abort_locked(ep, timer_msec);
649 spin_unlock_bh(&ep->ex_lock);
650 return error;
651}
652
653/**
636 * fc_exch_timeout() - Handle exchange timer expiration 654 * fc_exch_timeout() - Handle exchange timer expiration
637 * @work: The work_struct identifying the exchange that timed out 655 * @work: The work_struct identifying the exchange that timed out
638 */ 656 */
@@ -1715,6 +1733,7 @@ static void fc_exch_reset(struct fc_exch *ep)
1715 int rc = 1; 1733 int rc = 1;
1716 1734
1717 spin_lock_bh(&ep->ex_lock); 1735 spin_lock_bh(&ep->ex_lock);
1736 fc_exch_abort_locked(ep, 0);
1718 ep->state |= FC_EX_RST_CLEANUP; 1737 ep->state |= FC_EX_RST_CLEANUP;
1719 if (cancel_delayed_work(&ep->timeout_work)) 1738 if (cancel_delayed_work(&ep->timeout_work))
1720 atomic_dec(&ep->ex_refcnt); /* drop hold for timer */ 1739 atomic_dec(&ep->ex_refcnt); /* drop hold for timer */
@@ -1962,6 +1981,7 @@ static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,
1962 struct fc_exch *ep; 1981 struct fc_exch *ep;
1963 struct fc_seq *sp = NULL; 1982 struct fc_seq *sp = NULL;
1964 struct fc_frame_header *fh; 1983 struct fc_frame_header *fh;
1984 struct fc_fcp_pkt *fsp = NULL;
1965 int rc = 1; 1985 int rc = 1;
1966 1986
1967 ep = fc_exch_alloc(lport, fp); 1987 ep = fc_exch_alloc(lport, fp);
@@ -1984,8 +2004,10 @@ static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,
1984 fc_exch_setup_hdr(ep, fp, ep->f_ctl); 2004 fc_exch_setup_hdr(ep, fp, ep->f_ctl);
1985 sp->cnt++; 2005 sp->cnt++;
1986 2006
1987 if (ep->xid <= lport->lro_xid && fh->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD) 2007 if (ep->xid <= lport->lro_xid && fh->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD) {
2008 fsp = fr_fsp(fp);
1988 fc_fcp_ddp_setup(fr_fsp(fp), ep->xid); 2009 fc_fcp_ddp_setup(fr_fsp(fp), ep->xid);
2010 }
1989 2011
1990 if (unlikely(lport->tt.frame_send(lport, fp))) 2012 if (unlikely(lport->tt.frame_send(lport, fp)))
1991 goto err; 2013 goto err;
@@ -1999,7 +2021,8 @@ static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,
1999 spin_unlock_bh(&ep->ex_lock); 2021 spin_unlock_bh(&ep->ex_lock);
2000 return sp; 2022 return sp;
2001err: 2023err:
2002 fc_fcp_ddp_done(fr_fsp(fp)); 2024 if (fsp)
2025 fc_fcp_ddp_done(fsp);
2003 rc = fc_exch_done_locked(ep); 2026 rc = fc_exch_done_locked(ep);
2004 spin_unlock_bh(&ep->ex_lock); 2027 spin_unlock_bh(&ep->ex_lock);
2005 if (!rc) 2028 if (!rc)
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index afb63c843144..4c41ee816f0b 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -2019,6 +2019,11 @@ int fc_eh_abort(struct scsi_cmnd *sc_cmd)
2019 struct fc_fcp_internal *si; 2019 struct fc_fcp_internal *si;
2020 int rc = FAILED; 2020 int rc = FAILED;
2021 unsigned long flags; 2021 unsigned long flags;
2022 int rval;
2023
2024 rval = fc_block_scsi_eh(sc_cmd);
2025 if (rval)
2026 return rval;
2022 2027
2023 lport = shost_priv(sc_cmd->device->host); 2028 lport = shost_priv(sc_cmd->device->host);
2024 if (lport->state != LPORT_ST_READY) 2029 if (lport->state != LPORT_ST_READY)
@@ -2068,9 +2073,9 @@ int fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
2068 int rc = FAILED; 2073 int rc = FAILED;
2069 int rval; 2074 int rval;
2070 2075
2071 rval = fc_remote_port_chkready(rport); 2076 rval = fc_block_scsi_eh(sc_cmd);
2072 if (rval) 2077 if (rval)
2073 goto out; 2078 return rval;
2074 2079
2075 lport = shost_priv(sc_cmd->device->host); 2080 lport = shost_priv(sc_cmd->device->host);
2076 2081
@@ -2116,6 +2121,8 @@ int fc_eh_host_reset(struct scsi_cmnd *sc_cmd)
2116 2121
2117 FC_SCSI_DBG(lport, "Resetting host\n"); 2122 FC_SCSI_DBG(lport, "Resetting host\n");
2118 2123
2124 fc_block_scsi_eh(sc_cmd);
2125
2119 lport->tt.lport_reset(lport); 2126 lport->tt.lport_reset(lport);
2120 wait_tmo = jiffies + FC_HOST_RESET_TIMEOUT; 2127 wait_tmo = jiffies + FC_HOST_RESET_TIMEOUT;
2121 while (!fc_fcp_lport_queue_ready(lport) && time_before(jiffies, 2128 while (!fc_fcp_lport_queue_ready(lport) && time_before(jiffies,
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index e55ed9cf23fb..628f347404f9 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -88,6 +88,7 @@
88 */ 88 */
89 89
90#include <linux/timer.h> 90#include <linux/timer.h>
91#include <linux/delay.h>
91#include <linux/slab.h> 92#include <linux/slab.h>
92#include <asm/unaligned.h> 93#include <asm/unaligned.h>
93 94
@@ -1029,8 +1030,16 @@ static void fc_lport_enter_reset(struct fc_lport *lport)
1029 FCH_EVT_LIPRESET, 0); 1030 FCH_EVT_LIPRESET, 0);
1030 fc_vports_linkchange(lport); 1031 fc_vports_linkchange(lport);
1031 fc_lport_reset_locked(lport); 1032 fc_lport_reset_locked(lport);
1032 if (lport->link_up) 1033 if (lport->link_up) {
1034 /*
1035 * Wait upto resource allocation time out before
1036 * doing re-login since incomplete FIP exchanged
1037 * from last session may collide with exchanges
1038 * in new session.
1039 */
1040 msleep(lport->r_a_tov);
1033 fc_lport_enter_flogi(lport); 1041 fc_lport_enter_flogi(lport);
1042 }
1034} 1043}
1035 1044
1036/** 1045/**
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index f84084bba2f0..c9e3dc024bc3 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -1721,7 +1721,7 @@ static int sas_find_bcast_dev(struct domain_device *dev,
1721 list_for_each_entry(ch, &ex->children, siblings) { 1721 list_for_each_entry(ch, &ex->children, siblings) {
1722 if (ch->dev_type == EDGE_DEV || ch->dev_type == FANOUT_DEV) { 1722 if (ch->dev_type == EDGE_DEV || ch->dev_type == FANOUT_DEV) {
1723 res = sas_find_bcast_dev(ch, src_dev); 1723 res = sas_find_bcast_dev(ch, src_dev);
1724 if (src_dev) 1724 if (*src_dev)
1725 return res; 1725 return res;
1726 } 1726 }
1727 } 1727 }
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 7836eb01c7fc..a31e05f3bfd4 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1786,13 +1786,16 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1786 fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN); 1786 fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
1787 } 1787 }
1788 1788
1789 if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) { 1789 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
1790 if (ha->fw_attributes & BIT_4) { 1790 if (ha->fw_attributes & BIT_4) {
1791 int prot = 0;
1791 vha->flags.difdix_supported = 1; 1792 vha->flags.difdix_supported = 1;
1792 ql_dbg(ql_dbg_user, vha, 0x7082, 1793 ql_dbg(ql_dbg_user, vha, 0x7082,
1793 "Registered for DIF/DIX type 1 and 3 protection.\n"); 1794 "Registered for DIF/DIX type 1 and 3 protection.\n");
1795 if (ql2xenabledif == 1)
1796 prot = SHOST_DIX_TYPE0_PROTECTION;
1794 scsi_host_set_prot(vha->host, 1797 scsi_host_set_prot(vha->host,
1795 SHOST_DIF_TYPE1_PROTECTION 1798 prot | SHOST_DIF_TYPE1_PROTECTION
1796 | SHOST_DIF_TYPE2_PROTECTION 1799 | SHOST_DIF_TYPE2_PROTECTION
1797 | SHOST_DIF_TYPE3_PROTECTION 1800 | SHOST_DIF_TYPE3_PROTECTION
1798 | SHOST_DIX_TYPE1_PROTECTION 1801 | SHOST_DIX_TYPE1_PROTECTION
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 2155071f3100..d79cd8a5f831 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -8,24 +8,24 @@
8/* 8/*
9 * Table for showing the current message id in use for particular level 9 * Table for showing the current message id in use for particular level
10 * Change this table for addition of log/debug messages. 10 * Change this table for addition of log/debug messages.
11 * ----------------------------------------------------- 11 * ----------------------------------------------------------------------
12 * | Level | Last Value Used | 12 * | Level | Last Value Used | Holes |
13 * ----------------------------------------------------- 13 * ----------------------------------------------------------------------
14 * | Module Init and Probe | 0x0116 | 14 * | Module Init and Probe | 0x0116 | |
15 * | Mailbox commands | 0x111e | 15 * | Mailbox commands | 0x1126 | |
16 * | Device Discovery | 0x2083 | 16 * | Device Discovery | 0x2083 | |
17 * | Queue Command and IO tracing | 0x302e | 17 * | Queue Command and IO tracing | 0x302e | 0x3008 |
18 * | DPC Thread | 0x401c | 18 * | DPC Thread | 0x401c | |
19 * | Async Events | 0x5059 | 19 * | Async Events | 0x5059 | |
20 * | Timer Routines | 0x600d | 20 * | Timer Routines | 0x600d | |
21 * | User Space Interactions | 0x709c | 21 * | User Space Interactions | 0x709d | |
22 * | Task Management | 0x8043 | 22 * | Task Management | 0x8041 | |
23 * | AER/EEH | 0x900f | 23 * | AER/EEH | 0x900f | |
24 * | Virtual Port | 0xa007 | 24 * | Virtual Port | 0xa007 | |
25 * | ISP82XX Specific | 0xb027 | 25 * | ISP82XX Specific | 0xb04f | |
26 * | MultiQ | 0xc00b | 26 * | MultiQ | 0xc00b | |
27 * | Misc | 0xd00b | 27 * | Misc | 0xd00b | |
28 * ----------------------------------------------------- 28 * ----------------------------------------------------------------------
29 */ 29 */
30 30
31#include "qla_def.h" 31#include "qla_def.h"
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index cc5a79259d33..a03eaf40f377 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -2529,6 +2529,7 @@ struct qla_hw_data {
2529#define DT_ISP8021 BIT_14 2529#define DT_ISP8021 BIT_14
2530#define DT_ISP_LAST (DT_ISP8021 << 1) 2530#define DT_ISP_LAST (DT_ISP8021 << 1)
2531 2531
2532#define DT_T10_PI BIT_25
2532#define DT_IIDMA BIT_26 2533#define DT_IIDMA BIT_26
2533#define DT_FWI2 BIT_27 2534#define DT_FWI2 BIT_27
2534#define DT_ZIO_SUPPORTED BIT_28 2535#define DT_ZIO_SUPPORTED BIT_28
@@ -2572,6 +2573,7 @@ struct qla_hw_data {
2572#define IS_NOCACHE_VPD_TYPE(ha) (IS_QLA81XX(ha)) 2573#define IS_NOCACHE_VPD_TYPE(ha) (IS_QLA81XX(ha))
2573#define IS_ALOGIO_CAPABLE(ha) (IS_QLA23XX(ha) || IS_FWI2_CAPABLE(ha)) 2574#define IS_ALOGIO_CAPABLE(ha) (IS_QLA23XX(ha) || IS_FWI2_CAPABLE(ha))
2574 2575
2576#define IS_T10_PI_CAPABLE(ha) ((ha)->device_type & DT_T10_PI)
2575#define IS_IIDMA_CAPABLE(ha) ((ha)->device_type & DT_IIDMA) 2577#define IS_IIDMA_CAPABLE(ha) ((ha)->device_type & DT_IIDMA)
2576#define IS_FWI2_CAPABLE(ha) ((ha)->device_type & DT_FWI2) 2578#define IS_FWI2_CAPABLE(ha) ((ha)->device_type & DT_FWI2)
2577#define IS_ZIO_SUPPORTED(ha) ((ha)->device_type & DT_ZIO_SUPPORTED) 2579#define IS_ZIO_SUPPORTED(ha) ((ha)->device_type & DT_ZIO_SUPPORTED)
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 691783abfb69..aa69486dc064 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -537,6 +537,11 @@ struct sts_entry_24xx {
537 /* 537 /*
538 * If DIF Error is set in comp_status, these additional fields are 538 * If DIF Error is set in comp_status, these additional fields are
539 * defined: 539 * defined:
540 *
541 * !!! NOTE: Firmware sends expected/actual DIF data in big endian
542 * format; but all of the "data" field gets swab32-d in the beginning
543 * of qla2x00_status_entry().
544 *
540 * &data[10] : uint8_t report_runt_bg[2]; - computed guard 545 * &data[10] : uint8_t report_runt_bg[2]; - computed guard
541 * &data[12] : uint8_t actual_dif[8]; - DIF Data received 546 * &data[12] : uint8_t actual_dif[8]; - DIF Data received
542 * &data[20] : uint8_t expected_dif[8]; - DIF Data computed 547 * &data[20] : uint8_t expected_dif[8]; - DIF Data computed
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index def694271bf7..37da04d3db26 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -3838,15 +3838,12 @@ qla2x00_loop_resync(scsi_qla_host_t *vha)
3838 req = vha->req; 3838 req = vha->req;
3839 rsp = req->rsp; 3839 rsp = req->rsp;
3840 3840
3841 atomic_set(&vha->loop_state, LOOP_UPDATE);
3842 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 3841 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
3843 if (vha->flags.online) { 3842 if (vha->flags.online) {
3844 if (!(rval = qla2x00_fw_ready(vha))) { 3843 if (!(rval = qla2x00_fw_ready(vha))) {
3845 /* Wait at most MAX_TARGET RSCNs for a stable link. */ 3844 /* Wait at most MAX_TARGET RSCNs for a stable link. */
3846 wait_time = 256; 3845 wait_time = 256;
3847 do { 3846 do {
3848 atomic_set(&vha->loop_state, LOOP_UPDATE);
3849
3850 /* Issue a marker after FW becomes ready. */ 3847 /* Issue a marker after FW becomes ready. */
3851 qla2x00_marker(vha, req, rsp, 0, 0, 3848 qla2x00_marker(vha, req, rsp, 0, 0,
3852 MK_SYNC_ALL); 3849 MK_SYNC_ALL);
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index d2e904bc21c0..9902834e0b74 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -102,3 +102,32 @@ qla2x00_set_fcport_state(fc_port_t *fcport, int state)
102 fcport->d_id.b.al_pa); 102 fcport->d_id.b.al_pa);
103 } 103 }
104} 104}
105
106static inline int
107qla2x00_hba_err_chk_enabled(srb_t *sp)
108{
109 /*
110 * Uncomment when corresponding SCSI changes are done.
111 *
112 if (!sp->cmd->prot_chk)
113 return 0;
114 *
115 */
116
117 switch (scsi_get_prot_op(sp->cmd)) {
118 case SCSI_PROT_READ_STRIP:
119 case SCSI_PROT_WRITE_INSERT:
120 if (ql2xenablehba_err_chk >= 1)
121 return 1;
122 break;
123 case SCSI_PROT_READ_PASS:
124 case SCSI_PROT_WRITE_PASS:
125 if (ql2xenablehba_err_chk >= 2)
126 return 1;
127 break;
128 case SCSI_PROT_READ_INSERT:
129 case SCSI_PROT_WRITE_STRIP:
130 return 1;
131 }
132 return 0;
133}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 49d6906af886..dbec89622a0f 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -709,20 +709,28 @@ struct fw_dif_context {
709 * 709 *
710 */ 710 */
711static inline void 711static inline void
712qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt, 712qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
713 unsigned int protcnt) 713 unsigned int protcnt)
714{ 714{
715 struct sd_dif_tuple *spt; 715 struct scsi_cmnd *cmd = sp->cmd;
716 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 716 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
717 unsigned char op = scsi_get_prot_op(cmd);
718 717
719 switch (scsi_get_prot_type(cmd)) { 718 switch (scsi_get_prot_type(cmd)) {
720 /* For TYPE 0 protection: no checking */
721 case SCSI_PROT_DIF_TYPE0: 719 case SCSI_PROT_DIF_TYPE0:
722 pkt->ref_tag_mask[0] = 0x00; 720 /*
723 pkt->ref_tag_mask[1] = 0x00; 721 * No check for ql2xenablehba_err_chk, as it would be an
724 pkt->ref_tag_mask[2] = 0x00; 722 * I/O error if hba tag generation is not done.
725 pkt->ref_tag_mask[3] = 0x00; 723 */
724 pkt->ref_tag = cpu_to_le32((uint32_t)
725 (0xffffffff & scsi_get_lba(cmd)));
726
727 if (!qla2x00_hba_err_chk_enabled(sp))
728 break;
729
730 pkt->ref_tag_mask[0] = 0xff;
731 pkt->ref_tag_mask[1] = 0xff;
732 pkt->ref_tag_mask[2] = 0xff;
733 pkt->ref_tag_mask[3] = 0xff;
726 break; 734 break;
727 735
728 /* 736 /*
@@ -730,20 +738,16 @@ qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt,
730 * match LBA in CDB + N 738 * match LBA in CDB + N
731 */ 739 */
732 case SCSI_PROT_DIF_TYPE2: 740 case SCSI_PROT_DIF_TYPE2:
733 if (!ql2xenablehba_err_chk) 741 pkt->app_tag = __constant_cpu_to_le16(0);
734 break; 742 pkt->app_tag_mask[0] = 0x0;
735 743 pkt->app_tag_mask[1] = 0x0;
736 if (scsi_prot_sg_count(cmd)) {
737 spt = page_address(sg_page(scsi_prot_sglist(cmd))) +
738 scsi_prot_sglist(cmd)[0].offset;
739 pkt->app_tag = swab32(spt->app_tag);
740 pkt->app_tag_mask[0] = 0xff;
741 pkt->app_tag_mask[1] = 0xff;
742 }
743 744
744 pkt->ref_tag = cpu_to_le32((uint32_t) 745 pkt->ref_tag = cpu_to_le32((uint32_t)
745 (0xffffffff & scsi_get_lba(cmd))); 746 (0xffffffff & scsi_get_lba(cmd)));
746 747
748 if (!qla2x00_hba_err_chk_enabled(sp))
749 break;
750
747 /* enable ALL bytes of the ref tag */ 751 /* enable ALL bytes of the ref tag */
748 pkt->ref_tag_mask[0] = 0xff; 752 pkt->ref_tag_mask[0] = 0xff;
749 pkt->ref_tag_mask[1] = 0xff; 753 pkt->ref_tag_mask[1] = 0xff;
@@ -763,26 +767,15 @@ qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt,
763 * 16 bit app tag. 767 * 16 bit app tag.
764 */ 768 */
765 case SCSI_PROT_DIF_TYPE1: 769 case SCSI_PROT_DIF_TYPE1:
766 if (!ql2xenablehba_err_chk) 770 pkt->ref_tag = cpu_to_le32((uint32_t)
771 (0xffffffff & scsi_get_lba(cmd)));
772 pkt->app_tag = __constant_cpu_to_le16(0);
773 pkt->app_tag_mask[0] = 0x0;
774 pkt->app_tag_mask[1] = 0x0;
775
776 if (!qla2x00_hba_err_chk_enabled(sp))
767 break; 777 break;
768 778
769 if (protcnt && (op == SCSI_PROT_WRITE_STRIP ||
770 op == SCSI_PROT_WRITE_PASS)) {
771 spt = page_address(sg_page(scsi_prot_sglist(cmd))) +
772 scsi_prot_sglist(cmd)[0].offset;
773 ql_dbg(ql_dbg_io, vha, 0x3008,
774 "LBA from user %p, lba = 0x%x for cmd=%p.\n",
775 spt, (int)spt->ref_tag, cmd);
776 pkt->ref_tag = swab32(spt->ref_tag);
777 pkt->app_tag_mask[0] = 0x0;
778 pkt->app_tag_mask[1] = 0x0;
779 } else {
780 pkt->ref_tag = cpu_to_le32((uint32_t)
781 (0xffffffff & scsi_get_lba(cmd)));
782 pkt->app_tag = __constant_cpu_to_le16(0);
783 pkt->app_tag_mask[0] = 0x0;
784 pkt->app_tag_mask[1] = 0x0;
785 }
786 /* enable ALL bytes of the ref tag */ 779 /* enable ALL bytes of the ref tag */
787 pkt->ref_tag_mask[0] = 0xff; 780 pkt->ref_tag_mask[0] = 0xff;
788 pkt->ref_tag_mask[1] = 0xff; 781 pkt->ref_tag_mask[1] = 0xff;
@@ -798,8 +791,162 @@ qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt,
798 scsi_get_prot_type(cmd), cmd); 791 scsi_get_prot_type(cmd), cmd);
799} 792}
800 793
794struct qla2_sgx {
795 dma_addr_t dma_addr; /* OUT */
796 uint32_t dma_len; /* OUT */
797
798 uint32_t tot_bytes; /* IN */
799 struct scatterlist *cur_sg; /* IN */
800
801 /* for book keeping, bzero on initial invocation */
802 uint32_t bytes_consumed;
803 uint32_t num_bytes;
804 uint32_t tot_partial;
805
806 /* for debugging */
807 uint32_t num_sg;
808 srb_t *sp;
809};
801 810
802static int 811static int
812qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
813 uint32_t *partial)
814{
815 struct scatterlist *sg;
816 uint32_t cumulative_partial, sg_len;
817 dma_addr_t sg_dma_addr;
818
819 if (sgx->num_bytes == sgx->tot_bytes)
820 return 0;
821
822 sg = sgx->cur_sg;
823 cumulative_partial = sgx->tot_partial;
824
825 sg_dma_addr = sg_dma_address(sg);
826 sg_len = sg_dma_len(sg);
827
828 sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
829
830 if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
831 sgx->dma_len = (blk_sz - cumulative_partial);
832 sgx->tot_partial = 0;
833 sgx->num_bytes += blk_sz;
834 *partial = 0;
835 } else {
836 sgx->dma_len = sg_len - sgx->bytes_consumed;
837 sgx->tot_partial += sgx->dma_len;
838 *partial = 1;
839 }
840
841 sgx->bytes_consumed += sgx->dma_len;
842
843 if (sg_len == sgx->bytes_consumed) {
844 sg = sg_next(sg);
845 sgx->num_sg++;
846 sgx->cur_sg = sg;
847 sgx->bytes_consumed = 0;
848 }
849
850 return 1;
851}
852
853static int
854qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
855 uint32_t *dsd, uint16_t tot_dsds)
856{
857 void *next_dsd;
858 uint8_t avail_dsds = 0;
859 uint32_t dsd_list_len;
860 struct dsd_dma *dsd_ptr;
861 struct scatterlist *sg_prot;
862 uint32_t *cur_dsd = dsd;
863 uint16_t used_dsds = tot_dsds;
864
865 uint32_t prot_int;
866 uint32_t partial;
867 struct qla2_sgx sgx;
868 dma_addr_t sle_dma;
869 uint32_t sle_dma_len, tot_prot_dma_len = 0;
870 struct scsi_cmnd *cmd = sp->cmd;
871
872 prot_int = cmd->device->sector_size;
873
874 memset(&sgx, 0, sizeof(struct qla2_sgx));
875 sgx.tot_bytes = scsi_bufflen(sp->cmd);
876 sgx.cur_sg = scsi_sglist(sp->cmd);
877 sgx.sp = sp;
878
879 sg_prot = scsi_prot_sglist(sp->cmd);
880
881 while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
882
883 sle_dma = sgx.dma_addr;
884 sle_dma_len = sgx.dma_len;
885alloc_and_fill:
886 /* Allocate additional continuation packets? */
887 if (avail_dsds == 0) {
888 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
889 QLA_DSDS_PER_IOCB : used_dsds;
890 dsd_list_len = (avail_dsds + 1) * 12;
891 used_dsds -= avail_dsds;
892
893 /* allocate tracking DS */
894 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
895 if (!dsd_ptr)
896 return 1;
897
898 /* allocate new list */
899 dsd_ptr->dsd_addr = next_dsd =
900 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
901 &dsd_ptr->dsd_list_dma);
902
903 if (!next_dsd) {
904 /*
905 * Need to cleanup only this dsd_ptr, rest
906 * will be done by sp_free_dma()
907 */
908 kfree(dsd_ptr);
909 return 1;
910 }
911
912 list_add_tail(&dsd_ptr->list,
913 &((struct crc_context *)sp->ctx)->dsd_list);
914
915 sp->flags |= SRB_CRC_CTX_DSD_VALID;
916
917 /* add new list to cmd iocb or last list */
918 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
919 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
920 *cur_dsd++ = dsd_list_len;
921 cur_dsd = (uint32_t *)next_dsd;
922 }
923 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
924 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
925 *cur_dsd++ = cpu_to_le32(sle_dma_len);
926 avail_dsds--;
927
928 if (partial == 0) {
929 /* Got a full protection interval */
930 sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
931 sle_dma_len = 8;
932
933 tot_prot_dma_len += sle_dma_len;
934 if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
935 tot_prot_dma_len = 0;
936 sg_prot = sg_next(sg_prot);
937 }
938
939 partial = 1; /* So as to not re-enter this block */
940 goto alloc_and_fill;
941 }
942 }
943 /* Null termination */
944 *cur_dsd++ = 0;
945 *cur_dsd++ = 0;
946 *cur_dsd++ = 0;
947 return 0;
948}
949static int
803qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd, 950qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
804 uint16_t tot_dsds) 951 uint16_t tot_dsds)
805{ 952{
@@ -981,7 +1128,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
981 struct scsi_cmnd *cmd; 1128 struct scsi_cmnd *cmd;
982 struct scatterlist *cur_seg; 1129 struct scatterlist *cur_seg;
983 int sgc; 1130 int sgc;
984 uint32_t total_bytes; 1131 uint32_t total_bytes = 0;
985 uint32_t data_bytes; 1132 uint32_t data_bytes;
986 uint32_t dif_bytes; 1133 uint32_t dif_bytes;
987 uint8_t bundling = 1; 1134 uint8_t bundling = 1;
@@ -1023,8 +1170,10 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1023 __constant_cpu_to_le16(CF_READ_DATA); 1170 __constant_cpu_to_le16(CF_READ_DATA);
1024 } 1171 }
1025 1172
1026 tot_prot_dsds = scsi_prot_sg_count(cmd); 1173 if ((scsi_get_prot_op(sp->cmd) == SCSI_PROT_READ_INSERT) ||
1027 if (!tot_prot_dsds) 1174 (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_STRIP) ||
1175 (scsi_get_prot_op(sp->cmd) == SCSI_PROT_READ_STRIP) ||
1176 (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_INSERT))
1028 bundling = 0; 1177 bundling = 0;
1029 1178
1030 /* Allocate CRC context from global pool */ 1179 /* Allocate CRC context from global pool */
@@ -1047,7 +1196,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1047 1196
1048 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list); 1197 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1049 1198
1050 qla24xx_set_t10dif_tags(cmd, (struct fw_dif_context *) 1199 qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
1051 &crc_ctx_pkt->ref_tag, tot_prot_dsds); 1200 &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1052 1201
1053 cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma)); 1202 cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
@@ -1076,7 +1225,6 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1076 fcp_cmnd->additional_cdb_len |= 2; 1225 fcp_cmnd->additional_cdb_len |= 2;
1077 1226
1078 int_to_scsilun(sp->cmd->device->lun, &fcp_cmnd->lun); 1227 int_to_scsilun(sp->cmd->device->lun, &fcp_cmnd->lun);
1079 host_to_fcp_swap((uint8_t *)&fcp_cmnd->lun, sizeof(fcp_cmnd->lun));
1080 memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len); 1228 memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1081 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len); 1229 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1082 cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32( 1230 cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
@@ -1107,15 +1255,28 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1107 cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */ 1255 cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1108 1256
1109 /* Compute dif len and adjust data len to incude protection */ 1257 /* Compute dif len and adjust data len to incude protection */
1110 total_bytes = data_bytes;
1111 dif_bytes = 0; 1258 dif_bytes = 0;
1112 blk_size = cmd->device->sector_size; 1259 blk_size = cmd->device->sector_size;
1113 if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) { 1260 dif_bytes = (data_bytes / blk_size) * 8;
1114 dif_bytes = (data_bytes / blk_size) * 8; 1261
1115 total_bytes += dif_bytes; 1262 switch (scsi_get_prot_op(sp->cmd)) {
1263 case SCSI_PROT_READ_INSERT:
1264 case SCSI_PROT_WRITE_STRIP:
1265 total_bytes = data_bytes;
1266 data_bytes += dif_bytes;
1267 break;
1268
1269 case SCSI_PROT_READ_STRIP:
1270 case SCSI_PROT_WRITE_INSERT:
1271 case SCSI_PROT_READ_PASS:
1272 case SCSI_PROT_WRITE_PASS:
1273 total_bytes = data_bytes + dif_bytes;
1274 break;
1275 default:
1276 BUG();
1116 } 1277 }
1117 1278
1118 if (!ql2xenablehba_err_chk) 1279 if (!qla2x00_hba_err_chk_enabled(sp))
1119 fw_prot_opts |= 0x10; /* Disable Guard tag checking */ 1280 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1120 1281
1121 if (!bundling) { 1282 if (!bundling) {
@@ -1151,7 +1312,12 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1151 1312
1152 cmd_pkt->control_flags |= 1313 cmd_pkt->control_flags |=
1153 __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE); 1314 __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
1154 if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd, 1315
1316 if (!bundling && tot_prot_dsds) {
1317 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
1318 cur_dsd, tot_dsds))
1319 goto crc_queuing_error;
1320 } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1155 (tot_dsds - tot_prot_dsds))) 1321 (tot_dsds - tot_prot_dsds)))
1156 goto crc_queuing_error; 1322 goto crc_queuing_error;
1157 1323
@@ -1414,6 +1580,22 @@ qla24xx_dif_start_scsi(srb_t *sp)
1414 goto queuing_error; 1580 goto queuing_error;
1415 else 1581 else
1416 sp->flags |= SRB_DMA_VALID; 1582 sp->flags |= SRB_DMA_VALID;
1583
1584 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1585 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1586 struct qla2_sgx sgx;
1587 uint32_t partial;
1588
1589 memset(&sgx, 0, sizeof(struct qla2_sgx));
1590 sgx.tot_bytes = scsi_bufflen(cmd);
1591 sgx.cur_sg = scsi_sglist(cmd);
1592 sgx.sp = sp;
1593
1594 nseg = 0;
1595 while (qla24xx_get_one_block_sg(
1596 cmd->device->sector_size, &sgx, &partial))
1597 nseg++;
1598 }
1417 } else 1599 } else
1418 nseg = 0; 1600 nseg = 0;
1419 1601
@@ -1428,6 +1610,11 @@ qla24xx_dif_start_scsi(srb_t *sp)
1428 goto queuing_error; 1610 goto queuing_error;
1429 else 1611 else
1430 sp->flags |= SRB_CRC_PROT_DMA_VALID; 1612 sp->flags |= SRB_CRC_PROT_DMA_VALID;
1613
1614 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1615 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1616 nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1617 }
1431 } else { 1618 } else {
1432 nseg = 0; 1619 nseg = 0;
1433 } 1620 }
@@ -1454,6 +1641,7 @@ qla24xx_dif_start_scsi(srb_t *sp)
1454 /* Build header part of command packet (excluding the OPCODE). */ 1641 /* Build header part of command packet (excluding the OPCODE). */
1455 req->current_outstanding_cmd = handle; 1642 req->current_outstanding_cmd = handle;
1456 req->outstanding_cmds[handle] = sp; 1643 req->outstanding_cmds[handle] = sp;
1644 sp->handle = handle;
1457 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle; 1645 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1458 req->cnt -= req_cnt; 1646 req->cnt -= req_cnt;
1459 1647
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index b16b7725dee0..8a7591f035e6 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -719,7 +719,6 @@ skip_rio:
719 vha->flags.rscn_queue_overflow = 1; 719 vha->flags.rscn_queue_overflow = 1;
720 } 720 }
721 721
722 atomic_set(&vha->loop_state, LOOP_UPDATE);
723 atomic_set(&vha->loop_down_timer, 0); 722 atomic_set(&vha->loop_down_timer, 0);
724 vha->flags.management_server_logged_in = 0; 723 vha->flags.management_server_logged_in = 0;
725 724
@@ -1435,25 +1434,27 @@ struct scsi_dif_tuple {
1435 * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST 1434 * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST
1436 * to indicate to the kernel that the HBA detected error. 1435 * to indicate to the kernel that the HBA detected error.
1437 */ 1436 */
1438static inline void 1437static inline int
1439qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) 1438qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
1440{ 1439{
1441 struct scsi_qla_host *vha = sp->fcport->vha; 1440 struct scsi_qla_host *vha = sp->fcport->vha;
1442 struct scsi_cmnd *cmd = sp->cmd; 1441 struct scsi_cmnd *cmd = sp->cmd;
1443 struct scsi_dif_tuple *ep = 1442 uint8_t *ap = &sts24->data[12];
1444 (struct scsi_dif_tuple *)&sts24->data[20]; 1443 uint8_t *ep = &sts24->data[20];
1445 struct scsi_dif_tuple *ap =
1446 (struct scsi_dif_tuple *)&sts24->data[12];
1447 uint32_t e_ref_tag, a_ref_tag; 1444 uint32_t e_ref_tag, a_ref_tag;
1448 uint16_t e_app_tag, a_app_tag; 1445 uint16_t e_app_tag, a_app_tag;
1449 uint16_t e_guard, a_guard; 1446 uint16_t e_guard, a_guard;
1450 1447
1451 e_ref_tag = be32_to_cpu(ep->ref_tag); 1448 /*
1452 a_ref_tag = be32_to_cpu(ap->ref_tag); 1449 * swab32 of the "data" field in the beginning of qla2x00_status_entry()
1453 e_app_tag = be16_to_cpu(ep->app_tag); 1450 * would make guard field appear at offset 2
1454 a_app_tag = be16_to_cpu(ap->app_tag); 1451 */
1455 e_guard = be16_to_cpu(ep->guard); 1452 a_guard = le16_to_cpu(*(uint16_t *)(ap + 2));
1456 a_guard = be16_to_cpu(ap->guard); 1453 a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0));
1454 a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4));
1455 e_guard = le16_to_cpu(*(uint16_t *)(ep + 2));
1456 e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0));
1457 e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4));
1457 1458
1458 ql_dbg(ql_dbg_io, vha, 0x3023, 1459 ql_dbg(ql_dbg_io, vha, 0x3023,
1459 "iocb(s) %p Returned STATUS.\n", sts24); 1460 "iocb(s) %p Returned STATUS.\n", sts24);
@@ -1465,6 +1466,63 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
1465 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag, 1466 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag,
1466 a_app_tag, e_app_tag, a_guard, e_guard); 1467 a_app_tag, e_app_tag, a_guard, e_guard);
1467 1468
1469 /*
1470 * Ignore sector if:
1471 * For type 3: ref & app tag is all 'f's
1472 * For type 0,1,2: app tag is all 'f's
1473 */
1474 if ((a_app_tag == 0xffff) &&
1475 ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) ||
1476 (a_ref_tag == 0xffffffff))) {
1477 uint32_t blocks_done, resid;
1478 sector_t lba_s = scsi_get_lba(cmd);
1479
1480 /* 2TB boundary case covered automatically with this */
1481 blocks_done = e_ref_tag - (uint32_t)lba_s + 1;
1482
1483 resid = scsi_bufflen(cmd) - (blocks_done *
1484 cmd->device->sector_size);
1485
1486 scsi_set_resid(cmd, resid);
1487 cmd->result = DID_OK << 16;
1488
1489 /* Update protection tag */
1490 if (scsi_prot_sg_count(cmd)) {
1491 uint32_t i, j = 0, k = 0, num_ent;
1492 struct scatterlist *sg;
1493 struct sd_dif_tuple *spt;
1494
1495 /* Patch the corresponding protection tags */
1496 scsi_for_each_prot_sg(cmd, sg,
1497 scsi_prot_sg_count(cmd), i) {
1498 num_ent = sg_dma_len(sg) / 8;
1499 if (k + num_ent < blocks_done) {
1500 k += num_ent;
1501 continue;
1502 }
1503 j = blocks_done - k - 1;
1504 k = blocks_done;
1505 break;
1506 }
1507
1508 if (k != blocks_done) {
1509 qla_printk(KERN_WARNING, sp->fcport->vha->hw,
1510 "unexpected tag values tag:lba=%x:%llx)\n",
1511 e_ref_tag, (unsigned long long)lba_s);
1512 return 1;
1513 }
1514
1515 spt = page_address(sg_page(sg)) + sg->offset;
1516 spt += j;
1517
1518 spt->app_tag = 0xffff;
1519 if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3)
1520 spt->ref_tag = 0xffffffff;
1521 }
1522
1523 return 0;
1524 }
1525
1468 /* check guard */ 1526 /* check guard */
1469 if (e_guard != a_guard) { 1527 if (e_guard != a_guard) {
1470 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 1528 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
@@ -1472,28 +1530,30 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
1472 set_driver_byte(cmd, DRIVER_SENSE); 1530 set_driver_byte(cmd, DRIVER_SENSE);
1473 set_host_byte(cmd, DID_ABORT); 1531 set_host_byte(cmd, DID_ABORT);
1474 cmd->result |= SAM_STAT_CHECK_CONDITION << 1; 1532 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
1475 return; 1533 return 1;
1476 } 1534 }
1477 1535
1478 /* check appl tag */ 1536 /* check ref tag */
1479 if (e_app_tag != a_app_tag) { 1537 if (e_ref_tag != a_ref_tag) {
1480 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 1538 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1481 0x10, 0x2); 1539 0x10, 0x3);
1482 set_driver_byte(cmd, DRIVER_SENSE); 1540 set_driver_byte(cmd, DRIVER_SENSE);
1483 set_host_byte(cmd, DID_ABORT); 1541 set_host_byte(cmd, DID_ABORT);
1484 cmd->result |= SAM_STAT_CHECK_CONDITION << 1; 1542 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
1485 return; 1543 return 1;
1486 } 1544 }
1487 1545
1488 /* check ref tag */ 1546 /* check appl tag */
1489 if (e_ref_tag != a_ref_tag) { 1547 if (e_app_tag != a_app_tag) {
1490 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 1548 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1491 0x10, 0x3); 1549 0x10, 0x2);
1492 set_driver_byte(cmd, DRIVER_SENSE); 1550 set_driver_byte(cmd, DRIVER_SENSE);
1493 set_host_byte(cmd, DID_ABORT); 1551 set_host_byte(cmd, DID_ABORT);
1494 cmd->result |= SAM_STAT_CHECK_CONDITION << 1; 1552 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
1495 return; 1553 return 1;
1496 } 1554 }
1555
1556 return 1;
1497} 1557}
1498 1558
1499/** 1559/**
@@ -1767,7 +1827,7 @@ check_scsi_status:
1767 break; 1827 break;
1768 1828
1769 case CS_DIF_ERROR: 1829 case CS_DIF_ERROR:
1770 qla2x00_handle_dif_error(sp, sts24); 1830 logit = qla2x00_handle_dif_error(sp, sts24);
1771 break; 1831 break;
1772 default: 1832 default:
1773 cp->result = DID_ERROR << 16; 1833 cp->result = DID_ERROR << 16;
@@ -2468,11 +2528,10 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
2468 goto skip_msi; 2528 goto skip_msi;
2469 } 2529 }
2470 2530
2471 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX || 2531 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) {
2472 !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) {
2473 ql_log(ql_log_warn, vha, 0x0035, 2532 ql_log(ql_log_warn, vha, 0x0035,
2474 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n", 2533 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n",
2475 ha->pdev->revision, ha->fw_attributes); 2534 ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX);
2476 goto skip_msix; 2535 goto skip_msix;
2477 } 2536 }
2478 2537
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index c706ed370000..f488cc69fc79 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -472,7 +472,7 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
472 host->can_queue = base_vha->req->length + 128; 472 host->can_queue = base_vha->req->length + 128;
473 host->this_id = 255; 473 host->this_id = 255;
474 host->cmd_per_lun = 3; 474 host->cmd_per_lun = 3;
475 if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) 475 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
476 host->max_cmd_len = 32; 476 host->max_cmd_len = 32;
477 else 477 else
478 host->max_cmd_len = MAX_CMDSZ; 478 host->max_cmd_len = MAX_CMDSZ;
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 5cbf33a50b14..049807cda419 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -2208,6 +2208,7 @@ qla82xx_msix_rsp_q(int irq, void *dev_id)
2208 struct qla_hw_data *ha; 2208 struct qla_hw_data *ha;
2209 struct rsp_que *rsp; 2209 struct rsp_que *rsp;
2210 struct device_reg_82xx __iomem *reg; 2210 struct device_reg_82xx __iomem *reg;
2211 unsigned long flags;
2211 2212
2212 rsp = (struct rsp_que *) dev_id; 2213 rsp = (struct rsp_que *) dev_id;
2213 if (!rsp) { 2214 if (!rsp) {
@@ -2218,11 +2219,11 @@ qla82xx_msix_rsp_q(int irq, void *dev_id)
2218 2219
2219 ha = rsp->hw; 2220 ha = rsp->hw;
2220 reg = &ha->iobase->isp82; 2221 reg = &ha->iobase->isp82;
2221 spin_lock_irq(&ha->hardware_lock); 2222 spin_lock_irqsave(&ha->hardware_lock, flags);
2222 vha = pci_get_drvdata(ha->pdev); 2223 vha = pci_get_drvdata(ha->pdev);
2223 qla24xx_process_response_queue(vha, rsp); 2224 qla24xx_process_response_queue(vha, rsp);
2224 WRT_REG_DWORD(&reg->host_int, 0); 2225 WRT_REG_DWORD(&reg->host_int, 0);
2225 spin_unlock_irq(&ha->hardware_lock); 2226 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2226 return IRQ_HANDLED; 2227 return IRQ_HANDLED;
2227} 2228}
2228 2229
@@ -2838,6 +2839,16 @@ sufficient_dsds:
2838 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun); 2839 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
2839 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); 2840 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2840 2841
2842 /* build FCP_CMND IU */
2843 memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
2844 int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun);
2845 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
2846
2847 if (cmd->sc_data_direction == DMA_TO_DEVICE)
2848 ctx->fcp_cmnd->additional_cdb_len |= 1;
2849 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
2850 ctx->fcp_cmnd->additional_cdb_len |= 2;
2851
2841 /* 2852 /*
2842 * Update tagged queuing modifier -- default is TSK_SIMPLE (0). 2853 * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2843 */ 2854 */
@@ -2854,16 +2865,6 @@ sufficient_dsds:
2854 } 2865 }
2855 } 2866 }
2856 2867
2857 /* build FCP_CMND IU */
2858 memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
2859 int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun);
2860 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
2861
2862 if (cmd->sc_data_direction == DMA_TO_DEVICE)
2863 ctx->fcp_cmnd->additional_cdb_len |= 1;
2864 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
2865 ctx->fcp_cmnd->additional_cdb_len |= 2;
2866
2867 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len); 2868 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
2868 2869
2869 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 + 2870 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index e02df276804e..4cace3f20c04 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -106,17 +106,21 @@ MODULE_PARM_DESC(ql2xmaxqdepth,
106 "Maximum queue depth to report for target devices."); 106 "Maximum queue depth to report for target devices.");
107 107
108/* Do not change the value of this after module load */ 108/* Do not change the value of this after module load */
109int ql2xenabledif = 1; 109int ql2xenabledif = 0;
110module_param(ql2xenabledif, int, S_IRUGO|S_IWUSR); 110module_param(ql2xenabledif, int, S_IRUGO|S_IWUSR);
111MODULE_PARM_DESC(ql2xenabledif, 111MODULE_PARM_DESC(ql2xenabledif,
112 " Enable T10-CRC-DIF " 112 " Enable T10-CRC-DIF "
113 " Default is 0 - No DIF Support. 1 - Enable it"); 113 " Default is 0 - No DIF Support. 1 - Enable it"
114 ", 2 - Enable DIF for all types, except Type 0.");
114 115
115int ql2xenablehba_err_chk; 116int ql2xenablehba_err_chk = 2;
116module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR); 117module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR);
117MODULE_PARM_DESC(ql2xenablehba_err_chk, 118MODULE_PARM_DESC(ql2xenablehba_err_chk,
118 " Enable T10-CRC-DIF Error isolation by HBA" 119 " Enable T10-CRC-DIF Error isolation by HBA:\n"
119 " Default is 0 - Error isolation disabled, 1 - Enable it"); 120 " Default is 1.\n"
121 " 0 -- Error isolation disabled\n"
122 " 1 -- Error isolation enabled only for DIX Type 0\n"
123 " 2 -- Error isolation enabled for all Types\n");
120 124
121int ql2xiidmaenable=1; 125int ql2xiidmaenable=1;
122module_param(ql2xiidmaenable, int, S_IRUGO); 126module_param(ql2xiidmaenable, int, S_IRUGO);
@@ -909,7 +913,14 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
909 "Abort command mbx success.\n"); 913 "Abort command mbx success.\n");
910 wait = 1; 914 wait = 1;
911 } 915 }
916
917 spin_lock_irqsave(&ha->hardware_lock, flags);
912 qla2x00_sp_compl(ha, sp); 918 qla2x00_sp_compl(ha, sp);
919 spin_unlock_irqrestore(&ha->hardware_lock, flags);
920
921 /* Did the command return during mailbox execution? */
922 if (ret == FAILED && !CMD_SP(cmd))
923 ret = SUCCESS;
913 924
914 /* Wait for the command to be returned. */ 925 /* Wait for the command to be returned. */
915 if (wait) { 926 if (wait) {
@@ -2251,7 +2262,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2251 host->this_id = 255; 2262 host->this_id = 255;
2252 host->cmd_per_lun = 3; 2263 host->cmd_per_lun = 3;
2253 host->unique_id = host->host_no; 2264 host->unique_id = host->host_no;
2254 if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) 2265 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
2255 host->max_cmd_len = 32; 2266 host->max_cmd_len = 32;
2256 else 2267 else
2257 host->max_cmd_len = MAX_CMDSZ; 2268 host->max_cmd_len = MAX_CMDSZ;
@@ -2378,13 +2389,16 @@ skip_dpc:
2378 "Detected hba at address=%p.\n", 2389 "Detected hba at address=%p.\n",
2379 ha); 2390 ha);
2380 2391
2381 if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) { 2392 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
2382 if (ha->fw_attributes & BIT_4) { 2393 if (ha->fw_attributes & BIT_4) {
2394 int prot = 0;
2383 base_vha->flags.difdix_supported = 1; 2395 base_vha->flags.difdix_supported = 1;
2384 ql_dbg(ql_dbg_init, base_vha, 0x00f1, 2396 ql_dbg(ql_dbg_init, base_vha, 0x00f1,
2385 "Registering for DIF/DIX type 1 and 3 protection.\n"); 2397 "Registering for DIF/DIX type 1 and 3 protection.\n");
2398 if (ql2xenabledif == 1)
2399 prot = SHOST_DIX_TYPE0_PROTECTION;
2386 scsi_host_set_prot(host, 2400 scsi_host_set_prot(host,
2387 SHOST_DIF_TYPE1_PROTECTION 2401 prot | SHOST_DIF_TYPE1_PROTECTION
2388 | SHOST_DIF_TYPE2_PROTECTION 2402 | SHOST_DIF_TYPE2_PROTECTION
2389 | SHOST_DIF_TYPE3_PROTECTION 2403 | SHOST_DIF_TYPE3_PROTECTION
2390 | SHOST_DIX_TYPE1_PROTECTION 2404 | SHOST_DIX_TYPE1_PROTECTION
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 062c97bf62f5..13b6357c1fa2 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.03.07.03-k" 10#define QLA2XXX_VERSION "8.03.07.07-k"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 3 13#define QLA_DRIVER_MINOR_VER 3
diff --git a/drivers/scsi/qla4xxx/Kconfig b/drivers/scsi/qla4xxx/Kconfig
index 2c33ce6eac1e..0f5599e0abf6 100644
--- a/drivers/scsi/qla4xxx/Kconfig
+++ b/drivers/scsi/qla4xxx/Kconfig
@@ -1,6 +1,6 @@
1config SCSI_QLA_ISCSI 1config SCSI_QLA_ISCSI
2 tristate "QLogic ISP4XXX and ISP82XX host adapter family support" 2 tristate "QLogic ISP4XXX and ISP82XX host adapter family support"
3 depends on PCI && SCSI 3 depends on PCI && SCSI && NET
4 select SCSI_ISCSI_ATTRS 4 select SCSI_ISCSI_ATTRS
5 ---help--- 5 ---help---
6 This driver supports the QLogic 40xx (ISP4XXX) and 8022 (ISP82XX) 6 This driver supports the QLogic 40xx (ISP4XXX) and 8022 (ISP82XX)
diff --git a/drivers/sh/intc/chip.c b/drivers/sh/intc/chip.c
index f33e2dd97934..33b2ed451e09 100644
--- a/drivers/sh/intc/chip.c
+++ b/drivers/sh/intc/chip.c
@@ -186,6 +186,9 @@ static unsigned char intc_irq_sense_table[IRQ_TYPE_SENSE_MASK + 1] = {
186 !defined(CONFIG_CPU_SUBTYPE_SH7709) 186 !defined(CONFIG_CPU_SUBTYPE_SH7709)
187 [IRQ_TYPE_LEVEL_HIGH] = VALID(3), 187 [IRQ_TYPE_LEVEL_HIGH] = VALID(3),
188#endif 188#endif
189#if defined(CONFIG_ARCH_SH7372)
190 [IRQ_TYPE_EDGE_BOTH] = VALID(4),
191#endif
189}; 192};
190 193
191static int intc_set_type(struct irq_data *data, unsigned int type) 194static int intc_set_type(struct irq_data *data, unsigned int type)
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index 82dee9a6c0de..d3bff424286f 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -22,7 +22,7 @@
22 22
23#include <asm/io.h> 23#include <asm/io.h>
24#include <mach/board.h> 24#include <mach/board.h>
25#include <mach/gpio.h> 25#include <asm/gpio.h>
26#include <mach/cpu.h> 26#include <mach/cpu.h>
27 27
28/* SPI register offsets */ 28/* SPI register offsets */
diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
index d2407558773f..24cacff57786 100644
--- a/drivers/spi/spi-fsl-spi.c
+++ b/drivers/spi/spi-fsl-spi.c
@@ -825,6 +825,9 @@ static void fsl_spi_cpm_free(struct mpc8xxx_spi *mspi)
825{ 825{
826 struct device *dev = mspi->dev; 826 struct device *dev = mspi->dev;
827 827
828 if (!(mspi->flags & SPI_CPM_MODE))
829 return;
830
828 dma_unmap_single(dev, mspi->dma_dummy_rx, SPI_MRBLR, DMA_FROM_DEVICE); 831 dma_unmap_single(dev, mspi->dma_dummy_rx, SPI_MRBLR, DMA_FROM_DEVICE);
829 dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE); 832 dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE);
830 cpm_muram_free(cpm_muram_offset(mspi->tx_bd)); 833 cpm_muram_free(cpm_muram_offset(mspi->tx_bd));
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 8ac6542aedcd..fa594d604aca 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -786,9 +786,11 @@ static int __devinit spi_imx_probe(struct platform_device *pdev)
786 int cs_gpio = of_get_named_gpio(np, "cs-gpios", i); 786 int cs_gpio = of_get_named_gpio(np, "cs-gpios", i);
787 if (cs_gpio < 0) 787 if (cs_gpio < 0)
788 cs_gpio = mxc_platform_info->chipselect[i]; 788 cs_gpio = mxc_platform_info->chipselect[i];
789
790 spi_imx->chipselect[i] = cs_gpio;
789 if (cs_gpio < 0) 791 if (cs_gpio < 0)
790 continue; 792 continue;
791 spi_imx->chipselect[i] = cs_gpio; 793
792 ret = gpio_request(spi_imx->chipselect[i], DRIVER_NAME); 794 ret = gpio_request(spi_imx->chipselect[i], DRIVER_NAME);
793 if (ret) { 795 if (ret) {
794 while (i > 0) { 796 while (i > 0) {
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index 1d23f3831866..6a80749391db 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -50,6 +50,8 @@
50#define PCH_RX_THOLD 7 50#define PCH_RX_THOLD 7
51#define PCH_RX_THOLD_MAX 15 51#define PCH_RX_THOLD_MAX 15
52 52
53#define PCH_TX_THOLD 2
54
53#define PCH_MAX_BAUDRATE 5000000 55#define PCH_MAX_BAUDRATE 5000000
54#define PCH_MAX_FIFO_DEPTH 16 56#define PCH_MAX_FIFO_DEPTH 16
55 57
@@ -58,6 +60,7 @@
58#define PCH_SLEEP_TIME 10 60#define PCH_SLEEP_TIME 10
59 61
60#define SSN_LOW 0x02U 62#define SSN_LOW 0x02U
63#define SSN_HIGH 0x03U
61#define SSN_NO_CONTROL 0x00U 64#define SSN_NO_CONTROL 0x00U
62#define PCH_MAX_CS 0xFF 65#define PCH_MAX_CS 0xFF
63#define PCI_DEVICE_ID_GE_SPI 0x8816 66#define PCI_DEVICE_ID_GE_SPI 0x8816
@@ -316,16 +319,19 @@ static void pch_spi_handler_sub(struct pch_spi_data *data, u32 reg_spsr_val,
316 319
317 /* if transfer complete interrupt */ 320 /* if transfer complete interrupt */
318 if (reg_spsr_val & SPSR_FI_BIT) { 321 if (reg_spsr_val & SPSR_FI_BIT) {
319 if (tx_index < bpw_len) 322 if ((tx_index == bpw_len) && (rx_index == tx_index)) {
323 /* disable interrupts */
324 pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL);
325
326 /* transfer is completed;
327 inform pch_spi_process_messages */
328 data->transfer_complete = true;
329 data->transfer_active = false;
330 wake_up(&data->wait);
331 } else {
320 dev_err(&data->master->dev, 332 dev_err(&data->master->dev,
321 "%s : Transfer is not completed", __func__); 333 "%s : Transfer is not completed", __func__);
322 /* disable interrupts */ 334 }
323 pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL);
324
325 /* transfer is completed;inform pch_spi_process_messages */
326 data->transfer_complete = true;
327 data->transfer_active = false;
328 wake_up(&data->wait);
329 } 335 }
330} 336}
331 337
@@ -348,16 +354,26 @@ static irqreturn_t pch_spi_handler(int irq, void *dev_id)
348 "%s returning due to suspend\n", __func__); 354 "%s returning due to suspend\n", __func__);
349 return IRQ_NONE; 355 return IRQ_NONE;
350 } 356 }
351 if (data->use_dma)
352 return IRQ_NONE;
353 357
354 io_remap_addr = data->io_remap_addr; 358 io_remap_addr = data->io_remap_addr;
355 spsr = io_remap_addr + PCH_SPSR; 359 spsr = io_remap_addr + PCH_SPSR;
356 360
357 reg_spsr_val = ioread32(spsr); 361 reg_spsr_val = ioread32(spsr);
358 362
359 if (reg_spsr_val & SPSR_ORF_BIT) 363 if (reg_spsr_val & SPSR_ORF_BIT) {
360 dev_err(&board_dat->pdev->dev, "%s Over run error", __func__); 364 dev_err(&board_dat->pdev->dev, "%s Over run error\n", __func__);
365 if (data->current_msg->complete != 0) {
366 data->transfer_complete = true;
367 data->current_msg->status = -EIO;
368 data->current_msg->complete(data->current_msg->context);
369 data->bcurrent_msg_processing = false;
370 data->current_msg = NULL;
371 data->cur_trans = NULL;
372 }
373 }
374
375 if (data->use_dma)
376 return IRQ_NONE;
361 377
362 /* Check if the interrupt is for SPI device */ 378 /* Check if the interrupt is for SPI device */
363 if (reg_spsr_val & (SPSR_FI_BIT | SPSR_RFI_BIT)) { 379 if (reg_spsr_val & (SPSR_FI_BIT | SPSR_RFI_BIT)) {
@@ -756,10 +772,6 @@ static void pch_spi_set_ir(struct pch_spi_data *data)
756 772
757 wait_event_interruptible(data->wait, data->transfer_complete); 773 wait_event_interruptible(data->wait, data->transfer_complete);
758 774
759 pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL);
760 dev_dbg(&data->master->dev,
761 "%s:no more control over SSN-writing 0 to SSNXCR.", __func__);
762
763 /* clear all interrupts */ 775 /* clear all interrupts */
764 pch_spi_writereg(data->master, PCH_SPSR, 776 pch_spi_writereg(data->master, PCH_SPSR,
765 pch_spi_readreg(data->master, PCH_SPSR)); 777 pch_spi_readreg(data->master, PCH_SPSR));
@@ -815,10 +827,11 @@ static void pch_spi_copy_rx_data_for_dma(struct pch_spi_data *data, int bpw)
815 } 827 }
816} 828}
817 829
818static void pch_spi_start_transfer(struct pch_spi_data *data) 830static int pch_spi_start_transfer(struct pch_spi_data *data)
819{ 831{
820 struct pch_spi_dma_ctrl *dma; 832 struct pch_spi_dma_ctrl *dma;
821 unsigned long flags; 833 unsigned long flags;
834 int rtn;
822 835
823 dma = &data->dma; 836 dma = &data->dma;
824 837
@@ -833,19 +846,23 @@ static void pch_spi_start_transfer(struct pch_spi_data *data)
833 initiating the transfer. */ 846 initiating the transfer. */
834 dev_dbg(&data->master->dev, 847 dev_dbg(&data->master->dev,
835 "%s:waiting for transfer to get over\n", __func__); 848 "%s:waiting for transfer to get over\n", __func__);
836 wait_event_interruptible(data->wait, data->transfer_complete); 849 rtn = wait_event_interruptible_timeout(data->wait,
850 data->transfer_complete,
851 msecs_to_jiffies(2 * HZ));
837 852
838 dma_sync_sg_for_cpu(&data->master->dev, dma->sg_rx_p, dma->nent, 853 dma_sync_sg_for_cpu(&data->master->dev, dma->sg_rx_p, dma->nent,
839 DMA_FROM_DEVICE); 854 DMA_FROM_DEVICE);
855
856 dma_sync_sg_for_cpu(&data->master->dev, dma->sg_tx_p, dma->nent,
857 DMA_FROM_DEVICE);
858 memset(data->dma.tx_buf_virt, 0, PAGE_SIZE);
859
840 async_tx_ack(dma->desc_rx); 860 async_tx_ack(dma->desc_rx);
841 async_tx_ack(dma->desc_tx); 861 async_tx_ack(dma->desc_tx);
842 kfree(dma->sg_tx_p); 862 kfree(dma->sg_tx_p);
843 kfree(dma->sg_rx_p); 863 kfree(dma->sg_rx_p);
844 864
845 spin_lock_irqsave(&data->lock, flags); 865 spin_lock_irqsave(&data->lock, flags);
846 pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL);
847 dev_dbg(&data->master->dev,
848 "%s:no more control over SSN-writing 0 to SSNXCR.", __func__);
849 866
850 /* clear fifo threshold, disable interrupts, disable SPI transfer */ 867 /* clear fifo threshold, disable interrupts, disable SPI transfer */
851 pch_spi_setclr_reg(data->master, PCH_SPCR, 0, 868 pch_spi_setclr_reg(data->master, PCH_SPCR, 0,
@@ -858,6 +875,8 @@ static void pch_spi_start_transfer(struct pch_spi_data *data)
858 pch_spi_clear_fifo(data->master); 875 pch_spi_clear_fifo(data->master);
859 876
860 spin_unlock_irqrestore(&data->lock, flags); 877 spin_unlock_irqrestore(&data->lock, flags);
878
879 return rtn;
861} 880}
862 881
863static void pch_dma_rx_complete(void *arg) 882static void pch_dma_rx_complete(void *arg)
@@ -1023,8 +1042,7 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
1023 /* set receive fifo threshold and transmit fifo threshold */ 1042 /* set receive fifo threshold and transmit fifo threshold */
1024 pch_spi_setclr_reg(data->master, PCH_SPCR, 1043 pch_spi_setclr_reg(data->master, PCH_SPCR,
1025 ((size - 1) << SPCR_RFIC_FIELD) | 1044 ((size - 1) << SPCR_RFIC_FIELD) |
1026 ((PCH_MAX_FIFO_DEPTH - PCH_DMA_TRANS_SIZE) << 1045 (PCH_TX_THOLD << SPCR_TFIC_FIELD),
1027 SPCR_TFIC_FIELD),
1028 MASK_RFIC_SPCR_BITS | MASK_TFIC_SPCR_BITS); 1046 MASK_RFIC_SPCR_BITS | MASK_TFIC_SPCR_BITS);
1029 1047
1030 spin_unlock_irqrestore(&data->lock, flags); 1048 spin_unlock_irqrestore(&data->lock, flags);
@@ -1035,13 +1053,20 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
1035 /* offset, length setting */ 1053 /* offset, length setting */
1036 sg = dma->sg_rx_p; 1054 sg = dma->sg_rx_p;
1037 for (i = 0; i < num; i++, sg++) { 1055 for (i = 0; i < num; i++, sg++) {
1038 if (i == 0) { 1056 if (i == (num - 2)) {
1039 sg->offset = 0; 1057 sg->offset = size * i;
1058 sg->offset = sg->offset * (*bpw / 8);
1040 sg_set_page(sg, virt_to_page(dma->rx_buf_virt), rem, 1059 sg_set_page(sg, virt_to_page(dma->rx_buf_virt), rem,
1041 sg->offset); 1060 sg->offset);
1042 sg_dma_len(sg) = rem; 1061 sg_dma_len(sg) = rem;
1062 } else if (i == (num - 1)) {
1063 sg->offset = size * (i - 1) + rem;
1064 sg->offset = sg->offset * (*bpw / 8);
1065 sg_set_page(sg, virt_to_page(dma->rx_buf_virt), size,
1066 sg->offset);
1067 sg_dma_len(sg) = size;
1043 } else { 1068 } else {
1044 sg->offset = rem + size * (i - 1); 1069 sg->offset = size * i;
1045 sg->offset = sg->offset * (*bpw / 8); 1070 sg->offset = sg->offset * (*bpw / 8);
1046 sg_set_page(sg, virt_to_page(dma->rx_buf_virt), size, 1071 sg_set_page(sg, virt_to_page(dma->rx_buf_virt), size,
1047 sg->offset); 1072 sg->offset);
@@ -1065,6 +1090,16 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
1065 dma->desc_rx = desc_rx; 1090 dma->desc_rx = desc_rx;
1066 1091
1067 /* TX */ 1092 /* TX */
1093 if (data->bpw_len > PCH_DMA_TRANS_SIZE) {
1094 num = data->bpw_len / PCH_DMA_TRANS_SIZE;
1095 size = PCH_DMA_TRANS_SIZE;
1096 rem = 16;
1097 } else {
1098 num = 1;
1099 size = data->bpw_len;
1100 rem = data->bpw_len;
1101 }
1102
1068 dma->sg_tx_p = kzalloc(sizeof(struct scatterlist)*num, GFP_ATOMIC); 1103 dma->sg_tx_p = kzalloc(sizeof(struct scatterlist)*num, GFP_ATOMIC);
1069 sg_init_table(dma->sg_tx_p, num); /* Initialize SG table */ 1104 sg_init_table(dma->sg_tx_p, num); /* Initialize SG table */
1070 /* offset, length setting */ 1105 /* offset, length setting */
@@ -1162,6 +1197,7 @@ static void pch_spi_process_messages(struct work_struct *pwork)
1162 if (data->use_dma) 1197 if (data->use_dma)
1163 pch_spi_request_dma(data, 1198 pch_spi_request_dma(data,
1164 data->current_msg->spi->bits_per_word); 1199 data->current_msg->spi->bits_per_word);
1200 pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL);
1165 do { 1201 do {
1166 /* If we are already processing a message get the next 1202 /* If we are already processing a message get the next
1167 transfer structure from the message otherwise retrieve 1203 transfer structure from the message otherwise retrieve
@@ -1184,7 +1220,8 @@ static void pch_spi_process_messages(struct work_struct *pwork)
1184 1220
1185 if (data->use_dma) { 1221 if (data->use_dma) {
1186 pch_spi_handle_dma(data, &bpw); 1222 pch_spi_handle_dma(data, &bpw);
1187 pch_spi_start_transfer(data); 1223 if (!pch_spi_start_transfer(data))
1224 goto out;
1188 pch_spi_copy_rx_data_for_dma(data, bpw); 1225 pch_spi_copy_rx_data_for_dma(data, bpw);
1189 } else { 1226 } else {
1190 pch_spi_set_tx(data, &bpw); 1227 pch_spi_set_tx(data, &bpw);
@@ -1222,6 +1259,8 @@ static void pch_spi_process_messages(struct work_struct *pwork)
1222 1259
1223 } while (data->cur_trans != NULL); 1260 } while (data->cur_trans != NULL);
1224 1261
1262out:
1263 pch_spi_writereg(data->master, PCH_SSNXCR, SSN_HIGH);
1225 if (data->use_dma) 1264 if (data->use_dma)
1226 pch_spi_release_dma(data); 1265 pch_spi_release_dma(data);
1227} 1266}
diff --git a/drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsparser.c b/drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsparser.c
index c01c0cb0af4e..b99a11a9dd69 100644
--- a/drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsparser.c
+++ b/drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsparser.c
@@ -812,7 +812,7 @@ int AthCreateCommandList(struct ps_cmd_packet **HciPacketList, u32 *numPackets)
812 for(count = 0; count < Patch_Count; count++) { 812 for(count = 0; count < Patch_Count; count++) {
813 813
814 AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Freeing Patch Buffer %d \r\n",count)); 814 AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Freeing Patch Buffer %d \r\n",count));
815 kfree(RamPatch[Patch_Count].Data); 815 kfree(RamPatch[count].Data);
816 } 816 }
817 817
818 for(count = 0; count < Tag_Count; count++) { 818 for(count = 0; count < Tag_Count; count++) {
diff --git a/drivers/staging/brcm80211/brcmsmac/otp.c b/drivers/staging/brcm80211/brcmsmac/otp.c
index 34253cf37812..4a70180eba5d 100644
--- a/drivers/staging/brcm80211/brcmsmac/otp.c
+++ b/drivers/staging/brcm80211/brcmsmac/otp.c
@@ -16,6 +16,7 @@
16 16
17#include <linux/io.h> 17#include <linux/io.h>
18#include <linux/errno.h> 18#include <linux/errno.h>
19#include <linux/string.h>
19 20
20#include <brcm_hw_ids.h> 21#include <brcm_hw_ids.h>
21#include <chipcommon.h> 22#include <chipcommon.h>
diff --git a/drivers/staging/brcm80211/brcmsmac/types.h b/drivers/staging/brcm80211/brcmsmac/types.h
index bbf21897ae0e..823b5e4672e2 100644
--- a/drivers/staging/brcm80211/brcmsmac/types.h
+++ b/drivers/staging/brcm80211/brcmsmac/types.h
@@ -18,6 +18,7 @@
18#define _BRCM_TYPES_H_ 18#define _BRCM_TYPES_H_
19 19
20#include <linux/types.h> 20#include <linux/types.h>
21#include <linux/io.h>
21 22
22/* Bus types */ 23/* Bus types */
23#define SI_BUS 0 /* SOC Interconnect */ 24#define SI_BUS 0 /* SOC Interconnect */
diff --git a/drivers/staging/comedi/drivers/ni_labpc.c b/drivers/staging/comedi/drivers/ni_labpc.c
index 6859af0778cf..7611def97d06 100644
--- a/drivers/staging/comedi/drivers/ni_labpc.c
+++ b/drivers/staging/comedi/drivers/ni_labpc.c
@@ -241,8 +241,10 @@ static int labpc_eeprom_write_insn(struct comedi_device *dev,
241 struct comedi_insn *insn, 241 struct comedi_insn *insn,
242 unsigned int *data); 242 unsigned int *data);
243static void labpc_adc_timing(struct comedi_device *dev, struct comedi_cmd *cmd); 243static void labpc_adc_timing(struct comedi_device *dev, struct comedi_cmd *cmd);
244#ifdef CONFIG_COMEDI_PCI 244#ifdef CONFIG_ISA_DMA_API
245static unsigned int labpc_suggest_transfer_size(struct comedi_cmd cmd); 245static unsigned int labpc_suggest_transfer_size(struct comedi_cmd cmd);
246#endif
247#ifdef CONFIG_COMEDI_PCI
246static int labpc_find_device(struct comedi_device *dev, int bus, int slot); 248static int labpc_find_device(struct comedi_device *dev, int bus, int slot);
247#endif 249#endif
248static int labpc_dio_mem_callback(int dir, int port, int data, 250static int labpc_dio_mem_callback(int dir, int port, int data,
diff --git a/drivers/staging/dt3155v4l/dt3155v4l.c b/drivers/staging/dt3155v4l/dt3155v4l.c
index fe02d22274b4..05aa41cf875b 100644
--- a/drivers/staging/dt3155v4l/dt3155v4l.c
+++ b/drivers/staging/dt3155v4l/dt3155v4l.c
@@ -22,6 +22,7 @@
22#include <linux/stringify.h> 22#include <linux/stringify.h>
23#include <linux/delay.h> 23#include <linux/delay.h>
24#include <linux/kthread.h> 24#include <linux/kthread.h>
25#include <linux/slab.h>
25#include <media/v4l2-dev.h> 26#include <media/v4l2-dev.h>
26#include <media/v4l2-ioctl.h> 27#include <media/v4l2-ioctl.h>
27#include <media/videobuf2-dma-contig.h> 28#include <media/videobuf2-dma-contig.h>
diff --git a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c
index 627a98b4ec30..9e728b3415e3 100644
--- a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c
+++ b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c
@@ -22,6 +22,7 @@
22#include <linux/string.h> 22#include <linux/string.h>
23#include <linux/vmalloc.h> 23#include <linux/vmalloc.h>
24#include <linux/netdevice.h> 24#include <linux/netdevice.h>
25#include <asm/io.h>
25#include <asm/uaccess.h> 26#include <asm/uaccess.h>
26#include "ft1000.h" 27#include "ft1000.h"
27 28
diff --git a/drivers/staging/gma500/gem_glue.c b/drivers/staging/gma500/gem_glue.c
index 779ac1a12d24..daac12120653 100644
--- a/drivers/staging/gma500/gem_glue.c
+++ b/drivers/staging/gma500/gem_glue.c
@@ -20,26 +20,6 @@
20#include <drm/drmP.h> 20#include <drm/drmP.h>
21#include <drm/drm.h> 21#include <drm/drm.h>
22 22
23/**
24 * Initialize an already allocated GEM object of the specified size with
25 * no GEM provided backing store. Instead the caller is responsible for
26 * backing the object and handling it.
27 */
28int drm_gem_private_object_init(struct drm_device *dev,
29 struct drm_gem_object *obj, size_t size)
30{
31 BUG_ON((size & (PAGE_SIZE - 1)) != 0);
32
33 obj->dev = dev;
34 obj->filp = NULL;
35
36 kref_init(&obj->refcount);
37 atomic_set(&obj->handle_count, 0);
38 obj->size = size;
39
40 return 0;
41}
42
43void drm_gem_object_release_wrap(struct drm_gem_object *obj) 23void drm_gem_object_release_wrap(struct drm_gem_object *obj)
44{ 24{
45 /* Remove the list map if one is present */ 25 /* Remove the list map if one is present */
@@ -51,8 +31,7 @@ void drm_gem_object_release_wrap(struct drm_gem_object *obj)
51 kfree(list->map); 31 kfree(list->map);
52 list->map = NULL; 32 list->map = NULL;
53 } 33 }
54 if (obj->filp) 34 drm_gem_object_release(obj);
55 drm_gem_object_release(obj);
56} 35}
57 36
58/** 37/**
diff --git a/drivers/staging/gma500/gem_glue.h b/drivers/staging/gma500/gem_glue.h
index a0f2bc4e4ae7..ce5ce30f74db 100644
--- a/drivers/staging/gma500/gem_glue.h
+++ b/drivers/staging/gma500/gem_glue.h
@@ -1,4 +1,2 @@
1extern void drm_gem_object_release_wrap(struct drm_gem_object *obj); 1extern void drm_gem_object_release_wrap(struct drm_gem_object *obj);
2extern int drm_gem_private_object_init(struct drm_device *dev,
3 struct drm_gem_object *obj, size_t size);
4extern int gem_create_mmap_offset(struct drm_gem_object *obj); 2extern int gem_create_mmap_offset(struct drm_gem_object *obj);
diff --git a/drivers/staging/gma500/mdfld_dsi_dbi.c b/drivers/staging/gma500/mdfld_dsi_dbi.c
index 02e17c9c8637..fd211f3467c4 100644
--- a/drivers/staging/gma500/mdfld_dsi_dbi.c
+++ b/drivers/staging/gma500/mdfld_dsi_dbi.c
@@ -711,10 +711,11 @@ struct mdfld_dsi_encoder *mdfld_dsi_dbi_init(struct drm_device *dev,
711 /* Create drm encoder object */ 711 /* Create drm encoder object */
712 connector = &dsi_connector->base.base; 712 connector = &dsi_connector->base.base;
713 encoder = &dbi_output->base.base; 713 encoder = &dbi_output->base.base;
714 /* Review this if we ever get MIPI-HDMI bridges or similar */
714 drm_encoder_init(dev, 715 drm_encoder_init(dev,
715 encoder, 716 encoder,
716 p_funcs->encoder_funcs, 717 p_funcs->encoder_funcs,
717 DRM_MODE_ENCODER_MIPI); 718 DRM_MODE_ENCODER_LVDS);
718 drm_encoder_helper_add(encoder, p_funcs->encoder_helper_funcs); 719 drm_encoder_helper_add(encoder, p_funcs->encoder_helper_funcs);
719 720
720 /* Attach to given connector */ 721 /* Attach to given connector */
diff --git a/drivers/staging/gma500/mdfld_dsi_dbi.h b/drivers/staging/gma500/mdfld_dsi_dbi.h
index dc6242c51d0b..f0fa986fd934 100644
--- a/drivers/staging/gma500/mdfld_dsi_dbi.h
+++ b/drivers/staging/gma500/mdfld_dsi_dbi.h
@@ -42,9 +42,6 @@
42#include "mdfld_dsi_output.h" 42#include "mdfld_dsi_output.h"
43#include "mdfld_output.h" 43#include "mdfld_output.h"
44 44
45#define DRM_MODE_ENCODER_MIPI 5
46
47
48/* 45/*
49 * DBI encoder which inherits from mdfld_dsi_encoder 46 * DBI encoder which inherits from mdfld_dsi_encoder
50 */ 47 */
diff --git a/drivers/staging/gma500/mdfld_dsi_dpi.c b/drivers/staging/gma500/mdfld_dsi_dpi.c
index 6e03a91e947e..e685f1217baa 100644
--- a/drivers/staging/gma500/mdfld_dsi_dpi.c
+++ b/drivers/staging/gma500/mdfld_dsi_dpi.c
@@ -777,10 +777,15 @@ struct mdfld_dsi_encoder *mdfld_dsi_dpi_init(struct drm_device *dev,
777 /* Create drm encoder object */ 777 /* Create drm encoder object */
778 connector = &dsi_connector->base.base; 778 connector = &dsi_connector->base.base;
779 encoder = &dpi_output->base.base; 779 encoder = &dpi_output->base.base;
780 /*
781 * On existing hardware this will be a panel of some form,
782 * if future devices also have HDMI bridges this will need
783 * revisiting
784 */
780 drm_encoder_init(dev, 785 drm_encoder_init(dev,
781 encoder, 786 encoder,
782 p_funcs->encoder_funcs, 787 p_funcs->encoder_funcs,
783 DRM_MODE_ENCODER_MIPI); 788 DRM_MODE_ENCODER_LVDS);
784 drm_encoder_helper_add(encoder, 789 drm_encoder_helper_add(encoder,
785 p_funcs->encoder_helper_funcs); 790 p_funcs->encoder_helper_funcs);
786 791
diff --git a/drivers/staging/gma500/mdfld_dsi_output.c b/drivers/staging/gma500/mdfld_dsi_output.c
index 7536095c30a0..9050c0f78b15 100644
--- a/drivers/staging/gma500/mdfld_dsi_output.c
+++ b/drivers/staging/gma500/mdfld_dsi_output.c
@@ -955,7 +955,9 @@ void mdfld_dsi_output_init(struct drm_device *dev,
955 psb_output->type = (pipe == 0) ? INTEL_OUTPUT_MIPI : INTEL_OUTPUT_MIPI2; 955 psb_output->type = (pipe == 0) ? INTEL_OUTPUT_MIPI : INTEL_OUTPUT_MIPI2;
956 956
957 connector = &psb_output->base; 957 connector = &psb_output->base;
958 drm_connector_init(dev, connector, &mdfld_dsi_connector_funcs, DRM_MODE_CONNECTOR_MIPI); 958 /* Revisit type if MIPI/HDMI bridges ever appear on Medfield */
959 drm_connector_init(dev, connector, &mdfld_dsi_connector_funcs,
960 DRM_MODE_CONNECTOR_LVDS);
959 drm_connector_helper_add(connector, &mdfld_dsi_connector_helper_funcs); 961 drm_connector_helper_add(connector, &mdfld_dsi_connector_helper_funcs);
960 962
961 connector->display_info.subpixel_order = SubPixelHorizontalRGB; 963 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
diff --git a/drivers/staging/gma500/medfield.h b/drivers/staging/gma500/medfield.h
index 38165e8367e5..09e9687431f1 100644
--- a/drivers/staging/gma500/medfield.h
+++ b/drivers/staging/gma500/medfield.h
@@ -21,8 +21,6 @@
21 * DEALINGS IN THE SOFTWARE. 21 * DEALINGS IN THE SOFTWARE.
22 */ 22 */
23 23
24#define DRM_MODE_ENCODER_MIPI 5
25
26/* Medfield DSI controller registers */ 24/* Medfield DSI controller registers */
27 25
28#define MIPIA_DEVICE_READY_REG 0xb000 26#define MIPIA_DEVICE_READY_REG 0xb000
diff --git a/drivers/staging/gma500/psb_drv.h b/drivers/staging/gma500/psb_drv.h
index 72f487a2a1b7..fd4732dd783a 100644
--- a/drivers/staging/gma500/psb_drv.h
+++ b/drivers/staging/gma500/psb_drv.h
@@ -35,7 +35,6 @@
35 35
36/* Append new drm mode definition here, align with libdrm definition */ 36/* Append new drm mode definition here, align with libdrm definition */
37#define DRM_MODE_SCALE_NO_SCALE 2 37#define DRM_MODE_SCALE_NO_SCALE 2
38#define DRM_MODE_CONNECTOR_MIPI 15
39 38
40enum { 39enum {
41 CHIP_PSB_8108 = 0, /* Poulsbo */ 40 CHIP_PSB_8108 = 0, /* Poulsbo */
diff --git a/drivers/staging/hv/blkvsc_drv.c b/drivers/staging/hv/blkvsc_drv.c
index 3612574ca520..d286b2223181 100644
--- a/drivers/staging/hv/blkvsc_drv.c
+++ b/drivers/staging/hv/blkvsc_drv.c
@@ -325,7 +325,7 @@ static int blkvsc_do_operation(struct block_device_context *blkdev,
325 325
326 page_buf = alloc_page(GFP_KERNEL); 326 page_buf = alloc_page(GFP_KERNEL);
327 if (!page_buf) { 327 if (!page_buf) {
328 kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req); 328 kmem_cache_free(blkdev->request_pool, blkvsc_req);
329 return -ENOMEM; 329 return -ENOMEM;
330 } 330 }
331 331
@@ -422,7 +422,7 @@ cleanup:
422 422
423 __free_page(page_buf); 423 __free_page(page_buf);
424 424
425 kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req); 425 kmem_cache_free(blkdev->request_pool, blkvsc_req);
426 426
427 return ret; 427 return ret;
428} 428}
diff --git a/drivers/staging/iio/accel/adis16203_core.c b/drivers/staging/iio/accel/adis16203_core.c
index bf1988884e93..cf5d15da76ad 100644
--- a/drivers/staging/iio/accel/adis16203_core.c
+++ b/drivers/staging/iio/accel/adis16203_core.c
@@ -311,13 +311,17 @@ static int adis16203_read_raw(struct iio_dev *indio_dev,
311 mutex_lock(&indio_dev->mlock); 311 mutex_lock(&indio_dev->mlock);
312 addr = adis16203_addresses[chan->address][0]; 312 addr = adis16203_addresses[chan->address][0];
313 ret = adis16203_spi_read_reg_16(indio_dev, addr, &val16); 313 ret = adis16203_spi_read_reg_16(indio_dev, addr, &val16);
314 if (ret) 314 if (ret) {
315 mutex_unlock(&indio_dev->mlock);
315 return ret; 316 return ret;
317 }
316 318
317 if (val16 & ADIS16203_ERROR_ACTIVE) { 319 if (val16 & ADIS16203_ERROR_ACTIVE) {
318 ret = adis16203_check_status(indio_dev); 320 ret = adis16203_check_status(indio_dev);
319 if (ret) 321 if (ret) {
322 mutex_unlock(&indio_dev->mlock);
320 return ret; 323 return ret;
324 }
321 } 325 }
322 val16 = val16 & ((1 << chan->scan_type.realbits) - 1); 326 val16 = val16 & ((1 << chan->scan_type.realbits) - 1);
323 if (chan->scan_type.sign == 's') 327 if (chan->scan_type.sign == 's')
diff --git a/drivers/staging/iio/accel/adis16204_core.c b/drivers/staging/iio/accel/adis16204_core.c
index cfd09b3b9937..3e2b62654b7d 100644
--- a/drivers/staging/iio/accel/adis16204_core.c
+++ b/drivers/staging/iio/accel/adis16204_core.c
@@ -341,13 +341,17 @@ static int adis16204_read_raw(struct iio_dev *indio_dev,
341 mutex_lock(&indio_dev->mlock); 341 mutex_lock(&indio_dev->mlock);
342 addr = adis16204_addresses[chan->address][0]; 342 addr = adis16204_addresses[chan->address][0];
343 ret = adis16204_spi_read_reg_16(indio_dev, addr, &val16); 343 ret = adis16204_spi_read_reg_16(indio_dev, addr, &val16);
344 if (ret) 344 if (ret) {
345 mutex_unlock(&indio_dev->mlock);
345 return ret; 346 return ret;
347 }
346 348
347 if (val16 & ADIS16204_ERROR_ACTIVE) { 349 if (val16 & ADIS16204_ERROR_ACTIVE) {
348 ret = adis16204_check_status(indio_dev); 350 ret = adis16204_check_status(indio_dev);
349 if (ret) 351 if (ret) {
352 mutex_unlock(&indio_dev->mlock);
350 return ret; 353 return ret;
354 }
351 } 355 }
352 val16 = val16 & ((1 << chan->scan_type.realbits) - 1); 356 val16 = val16 & ((1 << chan->scan_type.realbits) - 1);
353 if (chan->scan_type.sign == 's') 357 if (chan->scan_type.sign == 's')
diff --git a/drivers/staging/iio/accel/adis16209_core.c b/drivers/staging/iio/accel/adis16209_core.c
index 55f3a7bcaf0a..bec1fa8de9b9 100644
--- a/drivers/staging/iio/accel/adis16209_core.c
+++ b/drivers/staging/iio/accel/adis16209_core.c
@@ -337,13 +337,17 @@ static int adis16209_read_raw(struct iio_dev *indio_dev,
337 mutex_lock(&indio_dev->mlock); 337 mutex_lock(&indio_dev->mlock);
338 addr = adis16209_addresses[chan->address][0]; 338 addr = adis16209_addresses[chan->address][0];
339 ret = adis16209_spi_read_reg_16(indio_dev, addr, &val16); 339 ret = adis16209_spi_read_reg_16(indio_dev, addr, &val16);
340 if (ret) 340 if (ret) {
341 mutex_unlock(&indio_dev->mlock);
341 return ret; 342 return ret;
343 }
342 344
343 if (val16 & ADIS16209_ERROR_ACTIVE) { 345 if (val16 & ADIS16209_ERROR_ACTIVE) {
344 ret = adis16209_check_status(indio_dev); 346 ret = adis16209_check_status(indio_dev);
345 if (ret) 347 if (ret) {
348 mutex_unlock(&indio_dev->mlock);
346 return ret; 349 return ret;
350 }
347 } 351 }
348 val16 = val16 & ((1 << chan->scan_type.realbits) - 1); 352 val16 = val16 & ((1 << chan->scan_type.realbits) - 1);
349 if (chan->scan_type.sign == 's') 353 if (chan->scan_type.sign == 's')
diff --git a/drivers/staging/iio/accel/adis16240_core.c b/drivers/staging/iio/accel/adis16240_core.c
index 4a4eafc58630..aee8b69173c4 100644
--- a/drivers/staging/iio/accel/adis16240_core.c
+++ b/drivers/staging/iio/accel/adis16240_core.c
@@ -370,13 +370,17 @@ static int adis16240_read_raw(struct iio_dev *indio_dev,
370 mutex_lock(&indio_dev->mlock); 370 mutex_lock(&indio_dev->mlock);
371 addr = adis16240_addresses[chan->address][0]; 371 addr = adis16240_addresses[chan->address][0];
372 ret = adis16240_spi_read_reg_16(indio_dev, addr, &val16); 372 ret = adis16240_spi_read_reg_16(indio_dev, addr, &val16);
373 if (ret) 373 if (ret) {
374 mutex_unlock(&indio_dev->mlock);
374 return ret; 375 return ret;
376 }
375 377
376 if (val16 & ADIS16240_ERROR_ACTIVE) { 378 if (val16 & ADIS16240_ERROR_ACTIVE) {
377 ret = adis16240_check_status(indio_dev); 379 ret = adis16240_check_status(indio_dev);
378 if (ret) 380 if (ret) {
381 mutex_unlock(&indio_dev->mlock);
379 return ret; 382 return ret;
383 }
380 } 384 }
381 val16 = val16 & ((1 << chan->scan_type.realbits) - 1); 385 val16 = val16 & ((1 << chan->scan_type.realbits) - 1);
382 if (chan->scan_type.sign == 's') 386 if (chan->scan_type.sign == 's')
diff --git a/drivers/staging/iio/gyro/adis16260_core.c b/drivers/staging/iio/gyro/adis16260_core.c
index 05797f404bea..f2d43cfcc493 100644
--- a/drivers/staging/iio/gyro/adis16260_core.c
+++ b/drivers/staging/iio/gyro/adis16260_core.c
@@ -446,13 +446,17 @@ static int adis16260_read_raw(struct iio_dev *indio_dev,
446 mutex_lock(&indio_dev->mlock); 446 mutex_lock(&indio_dev->mlock);
447 addr = adis16260_addresses[chan->address][0]; 447 addr = adis16260_addresses[chan->address][0];
448 ret = adis16260_spi_read_reg_16(indio_dev, addr, &val16); 448 ret = adis16260_spi_read_reg_16(indio_dev, addr, &val16);
449 if (ret) 449 if (ret) {
450 mutex_unlock(&indio_dev->mlock);
450 return ret; 451 return ret;
452 }
451 453
452 if (val16 & ADIS16260_ERROR_ACTIVE) { 454 if (val16 & ADIS16260_ERROR_ACTIVE) {
453 ret = adis16260_check_status(indio_dev); 455 ret = adis16260_check_status(indio_dev);
454 if (ret) 456 if (ret) {
457 mutex_unlock(&indio_dev->mlock);
455 return ret; 458 return ret;
459 }
456 } 460 }
457 val16 = val16 & ((1 << chan->scan_type.realbits) - 1); 461 val16 = val16 & ((1 << chan->scan_type.realbits) - 1);
458 if (chan->scan_type.sign == 's') 462 if (chan->scan_type.sign == 's')
diff --git a/drivers/staging/nvec/TODO b/drivers/staging/nvec/TODO
index 77b47f763f22..649d6b70deaa 100644
--- a/drivers/staging/nvec/TODO
+++ b/drivers/staging/nvec/TODO
@@ -4,5 +4,7 @@ ToDo list (incomplete, unordered)
4 - add compile as module support 4 - add compile as module support
5 - move nvec devices to mfd cells? 5 - move nvec devices to mfd cells?
6 - adjust to kernel style 6 - adjust to kernel style
7 7 - fix clk usage
8 8 should not be using clk_get_sys(), but clk_get(&pdev->dev, conn)
9 where conn is either NULL if the device only has one clock, or
10 the device specific name if it has multiple clocks.
diff --git a/drivers/staging/octeon/ethernet-rgmii.c b/drivers/staging/octeon/ethernet-rgmii.c
index 9c0d2936e486..c3d73f8431ae 100644
--- a/drivers/staging/octeon/ethernet-rgmii.c
+++ b/drivers/staging/octeon/ethernet-rgmii.c
@@ -26,6 +26,7 @@
26**********************************************************************/ 26**********************************************************************/
27#include <linux/kernel.h> 27#include <linux/kernel.h>
28#include <linux/netdevice.h> 28#include <linux/netdevice.h>
29#include <linux/interrupt.h>
29#include <linux/phy.h> 30#include <linux/phy.h>
30#include <linux/ratelimit.h> 31#include <linux/ratelimit.h>
31#include <net/dst.h> 32#include <net/dst.h>
diff --git a/drivers/staging/octeon/ethernet-spi.c b/drivers/staging/octeon/ethernet-spi.c
index 970825421884..d0e2d514968a 100644
--- a/drivers/staging/octeon/ethernet-spi.c
+++ b/drivers/staging/octeon/ethernet-spi.c
@@ -26,6 +26,7 @@
26**********************************************************************/ 26**********************************************************************/
27#include <linux/kernel.h> 27#include <linux/kernel.h>
28#include <linux/netdevice.h> 28#include <linux/netdevice.h>
29#include <linux/interrupt.h>
29#include <net/dst.h> 30#include <net/dst.h>
30 31
31#include <asm/octeon/octeon.h> 32#include <asm/octeon/octeon.h>
diff --git a/drivers/staging/rtl8192u/r819xU_firmware.c b/drivers/staging/rtl8192u/r819xU_firmware.c
index 6766f468639f..4bb5fffca5b9 100644
--- a/drivers/staging/rtl8192u/r819xU_firmware.c
+++ b/drivers/staging/rtl8192u/r819xU_firmware.c
@@ -399,10 +399,7 @@ download_firmware_fail:
399 399
400} 400}
401 401
402 402MODULE_FIRMWARE("RTL8192U/boot.img");
403 403MODULE_FIRMWARE("RTL8192U/main.img");
404 404MODULE_FIRMWARE("RTL8192U/data.img");
405
406
407
408 405
diff --git a/drivers/staging/rts_pstor/rtsx.c b/drivers/staging/rts_pstor/rtsx.c
index 5ff59f27d101..16c73fbff51f 100644
--- a/drivers/staging/rts_pstor/rtsx.c
+++ b/drivers/staging/rts_pstor/rtsx.c
@@ -66,12 +66,6 @@ static int msi_en;
66module_param(msi_en, int, S_IRUGO | S_IWUSR); 66module_param(msi_en, int, S_IRUGO | S_IWUSR);
67MODULE_PARM_DESC(msi_en, "enable msi"); 67MODULE_PARM_DESC(msi_en, "enable msi");
68 68
69/* These are used to make sure the module doesn't unload before all the
70 * threads have exited.
71 */
72static atomic_t total_threads = ATOMIC_INIT(0);
73static DECLARE_COMPLETION(threads_gone);
74
75static irqreturn_t rtsx_interrupt(int irq, void *dev_id); 69static irqreturn_t rtsx_interrupt(int irq, void *dev_id);
76 70
77/*********************************************************************** 71/***********************************************************************
@@ -192,7 +186,7 @@ static int queuecommand_lck(struct scsi_cmnd *srb,
192 /* enqueue the command and wake up the control thread */ 186 /* enqueue the command and wake up the control thread */
193 srb->scsi_done = done; 187 srb->scsi_done = done;
194 chip->srb = srb; 188 chip->srb = srb;
195 up(&(dev->sema)); 189 complete(&dev->cmnd_ready);
196 190
197 return 0; 191 return 0;
198} 192}
@@ -475,7 +469,7 @@ static int rtsx_control_thread(void *__dev)
475 current->flags |= PF_NOFREEZE; 469 current->flags |= PF_NOFREEZE;
476 470
477 for (;;) { 471 for (;;) {
478 if (down_interruptible(&dev->sema)) 472 if (wait_for_completion_interruptible(&dev->cmnd_ready))
479 break; 473 break;
480 474
481 /* lock the device pointers */ 475 /* lock the device pointers */
@@ -557,8 +551,6 @@ SkipForAbort:
557 mutex_unlock(&dev->dev_mutex); 551 mutex_unlock(&dev->dev_mutex);
558 } /* for (;;) */ 552 } /* for (;;) */
559 553
560 scsi_host_put(host);
561
562 /* notify the exit routine that we're actually exiting now 554 /* notify the exit routine that we're actually exiting now
563 * 555 *
564 * complete()/wait_for_completion() is similar to up()/down(), 556 * complete()/wait_for_completion() is similar to up()/down(),
@@ -573,7 +565,7 @@ SkipForAbort:
573 * This is important in preemption kernels, which transfer the flow 565 * This is important in preemption kernels, which transfer the flow
574 * of execution immediately upon a complete(). 566 * of execution immediately upon a complete().
575 */ 567 */
576 complete_and_exit(&threads_gone, 0); 568 complete_and_exit(&dev->control_exit, 0);
577} 569}
578 570
579 571
@@ -581,7 +573,6 @@ static int rtsx_polling_thread(void *__dev)
581{ 573{
582 struct rtsx_dev *dev = (struct rtsx_dev *)__dev; 574 struct rtsx_dev *dev = (struct rtsx_dev *)__dev;
583 struct rtsx_chip *chip = dev->chip; 575 struct rtsx_chip *chip = dev->chip;
584 struct Scsi_Host *host = rtsx_to_host(dev);
585 struct sd_info *sd_card = &(chip->sd_card); 576 struct sd_info *sd_card = &(chip->sd_card);
586 struct xd_info *xd_card = &(chip->xd_card); 577 struct xd_info *xd_card = &(chip->xd_card);
587 struct ms_info *ms_card = &(chip->ms_card); 578 struct ms_info *ms_card = &(chip->ms_card);
@@ -621,8 +612,7 @@ static int rtsx_polling_thread(void *__dev)
621 mutex_unlock(&dev->dev_mutex); 612 mutex_unlock(&dev->dev_mutex);
622 } 613 }
623 614
624 scsi_host_put(host); 615 complete_and_exit(&dev->polling_exit, 0);
625 complete_and_exit(&threads_gone, 0);
626} 616}
627 617
628/* 618/*
@@ -699,29 +689,38 @@ static void rtsx_release_resources(struct rtsx_dev *dev)
699{ 689{
700 printk(KERN_INFO "-- %s\n", __func__); 690 printk(KERN_INFO "-- %s\n", __func__);
701 691
692 /* Tell the control thread to exit. The SCSI host must
693 * already have been removed so it won't try to queue
694 * any more commands.
695 */
696 printk(KERN_INFO "-- sending exit command to thread\n");
697 complete(&dev->cmnd_ready);
698 if (dev->ctl_thread)
699 wait_for_completion(&dev->control_exit);
700 if (dev->polling_thread)
701 wait_for_completion(&dev->polling_exit);
702
703 wait_timeout(200);
704
702 if (dev->rtsx_resv_buf) { 705 if (dev->rtsx_resv_buf) {
703 dma_free_coherent(&(dev->pci->dev), HOST_CMDS_BUF_LEN, 706 dma_free_coherent(&(dev->pci->dev), RTSX_RESV_BUF_LEN,
704 dev->rtsx_resv_buf, dev->rtsx_resv_buf_addr); 707 dev->rtsx_resv_buf, dev->rtsx_resv_buf_addr);
705 dev->chip->host_cmds_ptr = NULL; 708 dev->chip->host_cmds_ptr = NULL;
706 dev->chip->host_sg_tbl_ptr = NULL; 709 dev->chip->host_sg_tbl_ptr = NULL;
707 } 710 }
708 711
709 pci_disable_device(dev->pci); 712 if (dev->irq > 0)
710 pci_release_regions(dev->pci);
711
712 if (dev->irq > 0) {
713 free_irq(dev->irq, (void *)dev); 713 free_irq(dev->irq, (void *)dev);
714 } 714 if (dev->chip->msi_en)
715 if (dev->chip->msi_en) {
716 pci_disable_msi(dev->pci); 715 pci_disable_msi(dev->pci);
717 } 716 if (dev->remap_addr)
717 iounmap(dev->remap_addr);
718 718
719 /* Tell the control thread to exit. The SCSI host must 719 pci_disable_device(dev->pci);
720 * already have been removed so it won't try to queue 720 pci_release_regions(dev->pci);
721 * any more commands. 721
722 */ 722 rtsx_release_chip(dev->chip);
723 printk(KERN_INFO "-- sending exit command to thread\n"); 723 kfree(dev->chip);
724 up(&dev->sema);
725} 724}
726 725
727/* First stage of disconnect processing: stop all commands and remove 726/* First stage of disconnect processing: stop all commands and remove
@@ -739,6 +738,7 @@ static void quiesce_and_remove_host(struct rtsx_dev *dev)
739 scsi_unlock(host); 738 scsi_unlock(host);
740 mutex_unlock(&dev->dev_mutex); 739 mutex_unlock(&dev->dev_mutex);
741 wake_up(&dev->delay_wait); 740 wake_up(&dev->delay_wait);
741 wait_for_completion(&dev->scanning_done);
742 742
743 /* Wait some time to let other threads exist */ 743 /* Wait some time to let other threads exist */
744 wait_timeout(100); 744 wait_timeout(100);
@@ -793,8 +793,7 @@ static int rtsx_scan_thread(void *__dev)
793 /* Should we unbind if no devices were detected? */ 793 /* Should we unbind if no devices were detected? */
794 } 794 }
795 795
796 scsi_host_put(rtsx_to_host(dev)); 796 complete_and_exit(&dev->scanning_done, 0);
797 complete_and_exit(&threads_gone, 0);
798} 797}
799 798
800static void rtsx_init_options(struct rtsx_chip *chip) 799static void rtsx_init_options(struct rtsx_chip *chip)
@@ -941,8 +940,11 @@ static int __devinit rtsx_probe(struct pci_dev *pci, const struct pci_device_id
941 940
942 spin_lock_init(&dev->reg_lock); 941 spin_lock_init(&dev->reg_lock);
943 mutex_init(&(dev->dev_mutex)); 942 mutex_init(&(dev->dev_mutex));
944 sema_init(&(dev->sema), 0); 943 init_completion(&dev->cmnd_ready);
944 init_completion(&dev->control_exit);
945 init_completion(&dev->polling_exit);
945 init_completion(&(dev->notify)); 946 init_completion(&(dev->notify));
947 init_completion(&dev->scanning_done);
946 init_waitqueue_head(&dev->delay_wait); 948 init_waitqueue_head(&dev->delay_wait);
947 949
948 dev->pci = pci; 950 dev->pci = pci;
@@ -992,28 +994,22 @@ static int __devinit rtsx_probe(struct pci_dev *pci, const struct pci_device_id
992 pci_set_master(pci); 994 pci_set_master(pci);
993 synchronize_irq(dev->irq); 995 synchronize_irq(dev->irq);
994 996
995 err = scsi_add_host(host, &pci->dev);
996 if (err) {
997 printk(KERN_ERR "Unable to add the scsi host\n");
998 goto errout;
999 }
1000
1001 rtsx_init_chip(dev->chip); 997 rtsx_init_chip(dev->chip);
1002 998
1003 /* Start up our control thread */ 999 /* Start up our control thread */
1004 th = kthread_create(rtsx_control_thread, dev, CR_DRIVER_NAME); 1000 th = kthread_run(rtsx_control_thread, dev, CR_DRIVER_NAME);
1005 if (IS_ERR(th)) { 1001 if (IS_ERR(th)) {
1006 printk(KERN_ERR "Unable to start control thread\n"); 1002 printk(KERN_ERR "Unable to start control thread\n");
1007 err = PTR_ERR(th); 1003 err = PTR_ERR(th);
1008 goto errout; 1004 goto errout;
1009 } 1005 }
1006 dev->ctl_thread = th;
1010 1007
1011 /* Take a reference to the host for the control thread and 1008 err = scsi_add_host(host, &pci->dev);
1012 * count it among all the threads we have launched. Then 1009 if (err) {
1013 * start it up. */ 1010 printk(KERN_ERR "Unable to add the scsi host\n");
1014 scsi_host_get(rtsx_to_host(dev)); 1011 goto errout;
1015 atomic_inc(&total_threads); 1012 }
1016 wake_up_process(th);
1017 1013
1018 /* Start up the thread for delayed SCSI-device scanning */ 1014 /* Start up the thread for delayed SCSI-device scanning */
1019 th = kthread_create(rtsx_scan_thread, dev, "rtsx-scan"); 1015 th = kthread_create(rtsx_scan_thread, dev, "rtsx-scan");
@@ -1024,28 +1020,17 @@ static int __devinit rtsx_probe(struct pci_dev *pci, const struct pci_device_id
1024 goto errout; 1020 goto errout;
1025 } 1021 }
1026 1022
1027 /* Take a reference to the host for the scanning thread and
1028 * count it among all the threads we have launched. Then
1029 * start it up. */
1030 scsi_host_get(rtsx_to_host(dev));
1031 atomic_inc(&total_threads);
1032 wake_up_process(th); 1023 wake_up_process(th);
1033 1024
1034 /* Start up the thread for polling thread */ 1025 /* Start up the thread for polling thread */
1035 th = kthread_create(rtsx_polling_thread, dev, "rtsx-polling"); 1026 th = kthread_run(rtsx_polling_thread, dev, "rtsx-polling");
1036 if (IS_ERR(th)) { 1027 if (IS_ERR(th)) {
1037 printk(KERN_ERR "Unable to start the device-polling thread\n"); 1028 printk(KERN_ERR "Unable to start the device-polling thread\n");
1038 quiesce_and_remove_host(dev); 1029 quiesce_and_remove_host(dev);
1039 err = PTR_ERR(th); 1030 err = PTR_ERR(th);
1040 goto errout; 1031 goto errout;
1041 } 1032 }
1042 1033 dev->polling_thread = th;
1043 /* Take a reference to the host for the polling thread and
1044 * count it among all the threads we have launched. Then
1045 * start it up. */
1046 scsi_host_get(rtsx_to_host(dev));
1047 atomic_inc(&total_threads);
1048 wake_up_process(th);
1049 1034
1050 pci_set_drvdata(pci, dev); 1035 pci_set_drvdata(pci, dev);
1051 1036
@@ -1108,16 +1093,6 @@ static void __exit rtsx_exit(void)
1108 1093
1109 pci_unregister_driver(&driver); 1094 pci_unregister_driver(&driver);
1110 1095
1111 /* Don't return until all of our control and scanning threads
1112 * have exited. Since each thread signals threads_gone as its
1113 * last act, we have to call wait_for_completion the right number
1114 * of times.
1115 */
1116 while (atomic_read(&total_threads) > 0) {
1117 wait_for_completion(&threads_gone);
1118 atomic_dec(&total_threads);
1119 }
1120
1121 printk(KERN_INFO "%s module exit\n", CR_DRIVER_NAME); 1096 printk(KERN_INFO "%s module exit\n", CR_DRIVER_NAME);
1122} 1097}
1123 1098
diff --git a/drivers/staging/rts_pstor/rtsx.h b/drivers/staging/rts_pstor/rtsx.h
index 247615ba1d2a..86e47c2e3e3c 100644
--- a/drivers/staging/rts_pstor/rtsx.h
+++ b/drivers/staging/rts_pstor/rtsx.h
@@ -112,9 +112,16 @@ struct rtsx_dev {
112 /* locks */ 112 /* locks */
113 spinlock_t reg_lock; 113 spinlock_t reg_lock;
114 114
115 struct task_struct *ctl_thread; /* the control thread */
116 struct task_struct *polling_thread; /* the polling thread */
117
115 /* mutual exclusion and synchronization structures */ 118 /* mutual exclusion and synchronization structures */
116 struct semaphore sema; /* to sleep thread on */ 119 struct completion cmnd_ready; /* to sleep thread on */
120 struct completion control_exit; /* control thread exit */
121 struct completion polling_exit; /* polling thread exit */
117 struct completion notify; /* thread begin/end */ 122 struct completion notify; /* thread begin/end */
123 struct completion scanning_done; /* wait for scan thread */
124
118 wait_queue_head_t delay_wait; /* wait during scan, reset */ 125 wait_queue_head_t delay_wait; /* wait during scan, reset */
119 struct mutex dev_mutex; 126 struct mutex dev_mutex;
120 127
diff --git a/drivers/staging/solo6x10/core.c b/drivers/staging/solo6x10/core.c
index 76779949f141..f974f6412ad7 100644
--- a/drivers/staging/solo6x10/core.c
+++ b/drivers/staging/solo6x10/core.c
@@ -21,6 +21,7 @@
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/pci.h> 22#include <linux/pci.h>
23#include <linux/interrupt.h> 23#include <linux/interrupt.h>
24#include <linux/slab.h>
24#include <linux/videodev2.h> 25#include <linux/videodev2.h>
25#include "solo6x10.h" 26#include "solo6x10.h"
26#include "tw28.h" 27#include "tw28.h"
diff --git a/drivers/staging/solo6x10/enc.c b/drivers/staging/solo6x10/enc.c
index 285f7f350062..de502599bb19 100644
--- a/drivers/staging/solo6x10/enc.c
+++ b/drivers/staging/solo6x10/enc.c
@@ -18,6 +18,7 @@
18 */ 18 */
19 19
20#include <linux/kernel.h> 20#include <linux/kernel.h>
21#include <linux/slab.h>
21#include "solo6x10.h" 22#include "solo6x10.h"
22#include "osd-font.h" 23#include "osd-font.h"
23 24
diff --git a/drivers/staging/solo6x10/g723.c b/drivers/staging/solo6x10/g723.c
index bd8eb92c94b1..59274bfca95b 100644
--- a/drivers/staging/solo6x10/g723.c
+++ b/drivers/staging/solo6x10/g723.c
@@ -21,6 +21,7 @@
21#include <linux/mempool.h> 21#include <linux/mempool.h>
22#include <linux/poll.h> 22#include <linux/poll.h>
23#include <linux/kthread.h> 23#include <linux/kthread.h>
24#include <linux/slab.h>
24#include <linux/freezer.h> 25#include <linux/freezer.h>
25#include <sound/core.h> 26#include <sound/core.h>
26#include <sound/initval.h> 27#include <sound/initval.h>
diff --git a/drivers/staging/solo6x10/p2m.c b/drivers/staging/solo6x10/p2m.c
index 5717eabb04a4..56210f0fc5ec 100644
--- a/drivers/staging/solo6x10/p2m.c
+++ b/drivers/staging/solo6x10/p2m.c
@@ -18,6 +18,7 @@
18 */ 18 */
19 19
20#include <linux/kernel.h> 20#include <linux/kernel.h>
21#include <linux/slab.h>
21#include <linux/scatterlist.h> 22#include <linux/scatterlist.h>
22#include "solo6x10.h" 23#include "solo6x10.h"
23 24
diff --git a/drivers/staging/solo6x10/solo6x10.h b/drivers/staging/solo6x10/solo6x10.h
index 17c06bd6cc91..abee7213202f 100644
--- a/drivers/staging/solo6x10/solo6x10.h
+++ b/drivers/staging/solo6x10/solo6x10.h
@@ -28,6 +28,7 @@
28#include <linux/list.h> 28#include <linux/list.h>
29#include <linux/wait.h> 29#include <linux/wait.h>
30#include <linux/delay.h> 30#include <linux/delay.h>
31#include <linux/slab.h>
31#include <asm/io.h> 32#include <asm/io.h>
32#include <linux/atomic.h> 33#include <linux/atomic.h>
33#include <linux/videodev2.h> 34#include <linux/videodev2.h>
diff --git a/drivers/staging/speakup/devsynth.c b/drivers/staging/speakup/devsynth.c
index 39dc586fc8bb..940769ef883f 100644
--- a/drivers/staging/speakup/devsynth.c
+++ b/drivers/staging/speakup/devsynth.c
@@ -18,13 +18,14 @@ static ssize_t speakup_file_write(struct file *fp, const char *buffer,
18{ 18{
19 size_t count = nbytes; 19 size_t count = nbytes;
20 const char *ptr = buffer; 20 const char *ptr = buffer;
21 int bytes; 21 size_t bytes;
22 unsigned long flags; 22 unsigned long flags;
23 u_char buf[256]; 23 u_char buf[256];
24
24 if (synth == NULL) 25 if (synth == NULL)
25 return -ENODEV; 26 return -ENODEV;
26 while (count > 0) { 27 while (count > 0) {
27 bytes = min_t(size_t, count, sizeof(buf)); 28 bytes = min(count, sizeof(buf));
28 if (copy_from_user(buf, ptr, bytes)) 29 if (copy_from_user(buf, ptr, bytes))
29 return -EFAULT; 30 return -EFAULT;
30 count -= bytes; 31 count -= bytes;
diff --git a/drivers/staging/tidspbridge/core/dsp-clock.c b/drivers/staging/tidspbridge/core/dsp-clock.c
index 589a0554332e..3d1279c424a8 100644
--- a/drivers/staging/tidspbridge/core/dsp-clock.c
+++ b/drivers/staging/tidspbridge/core/dsp-clock.c
@@ -209,7 +209,6 @@ int dsp_clk_enable(enum dsp_clk_id clk_id)
209 break; 209 break;
210#ifdef CONFIG_OMAP_MCBSP 210#ifdef CONFIG_OMAP_MCBSP
211 case MCBSP_CLK: 211 case MCBSP_CLK:
212 omap_mcbsp_set_io_type(MCBSP_ID(clk_id), OMAP_MCBSP_POLL_IO);
213 omap_mcbsp_request(MCBSP_ID(clk_id)); 212 omap_mcbsp_request(MCBSP_ID(clk_id));
214 omap2_mcbsp_set_clks_src(MCBSP_ID(clk_id), MCBSP_CLKS_PAD_SRC); 213 omap2_mcbsp_set_clks_src(MCBSP_ID(clk_id), MCBSP_CLKS_PAD_SRC);
215 break; 214 break;
diff --git a/drivers/staging/zcache/Makefile b/drivers/staging/zcache/Makefile
index f5ec64f94470..60daa272c204 100644
--- a/drivers/staging/zcache/Makefile
+++ b/drivers/staging/zcache/Makefile
@@ -1,3 +1,3 @@
1zcache-y := tmem.o 1zcache-y := zcache-main.o tmem.o
2 2
3obj-$(CONFIG_ZCACHE) += zcache.o 3obj-$(CONFIG_ZCACHE) += zcache.o
diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
index 975e34bcd722..1ca66ea9b281 100644
--- a/drivers/staging/zcache/tmem.c
+++ b/drivers/staging/zcache/tmem.c
@@ -604,7 +604,7 @@ int tmem_get(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
604 struct tmem_obj *obj; 604 struct tmem_obj *obj;
605 void *pampd; 605 void *pampd;
606 bool ephemeral = is_ephemeral(pool); 606 bool ephemeral = is_ephemeral(pool);
607 uint32_t ret = -1; 607 int ret = -1;
608 struct tmem_hashbucket *hb; 608 struct tmem_hashbucket *hb;
609 bool free = (get_and_free == 1) || ((get_and_free == 0) && ephemeral); 609 bool free = (get_and_free == 1) || ((get_and_free == 0) && ephemeral);
610 bool lock_held = false; 610 bool lock_held = false;
diff --git a/drivers/staging/zcache/zcache.c b/drivers/staging/zcache/zcache-main.c
index 65a81a0d7c49..462fbc20561f 100644
--- a/drivers/staging/zcache/zcache.c
+++ b/drivers/staging/zcache/zcache-main.c
@@ -19,6 +19,7 @@
19 * http://marc.info/?l=linux-mm&m=127811271605009 19 * http://marc.info/?l=linux-mm&m=127811271605009
20 */ 20 */
21 21
22#include <linux/module.h>
22#include <linux/cpu.h> 23#include <linux/cpu.h>
23#include <linux/highmem.h> 24#include <linux/highmem.h>
24#include <linux/list.h> 25#include <linux/list.h>
@@ -27,6 +28,7 @@
27#include <linux/spinlock.h> 28#include <linux/spinlock.h>
28#include <linux/types.h> 29#include <linux/types.h>
29#include <linux/atomic.h> 30#include <linux/atomic.h>
31#include <linux/math64.h>
30#include "tmem.h" 32#include "tmem.h"
31 33
32#include "../zram/xvmalloc.h" /* if built in drivers/staging */ 34#include "../zram/xvmalloc.h" /* if built in drivers/staging */
@@ -53,6 +55,9 @@
53 55
54#define MAX_CLIENTS 16 56#define MAX_CLIENTS 16
55#define LOCAL_CLIENT ((uint16_t)-1) 57#define LOCAL_CLIENT ((uint16_t)-1)
58
59MODULE_LICENSE("GPL");
60
56struct zcache_client { 61struct zcache_client {
57 struct tmem_pool *tmem_pools[MAX_POOLS_PER_CLIENT]; 62 struct tmem_pool *tmem_pools[MAX_POOLS_PER_CLIENT];
58 struct xv_pool *xvpool; 63 struct xv_pool *xvpool;
@@ -1153,11 +1158,12 @@ static void *zcache_pampd_create(char *data, size_t size, bool raw, int eph,
1153 size_t clen; 1158 size_t clen;
1154 int ret; 1159 int ret;
1155 unsigned long count; 1160 unsigned long count;
1156 struct page *page = virt_to_page(data); 1161 struct page *page = (struct page *)(data);
1157 struct zcache_client *cli = pool->client; 1162 struct zcache_client *cli = pool->client;
1158 uint16_t client_id = get_client_id_from_client(cli); 1163 uint16_t client_id = get_client_id_from_client(cli);
1159 unsigned long zv_mean_zsize; 1164 unsigned long zv_mean_zsize;
1160 unsigned long curr_pers_pampd_count; 1165 unsigned long curr_pers_pampd_count;
1166 u64 total_zsize;
1161 1167
1162 if (eph) { 1168 if (eph) {
1163 ret = zcache_compress(page, &cdata, &clen); 1169 ret = zcache_compress(page, &cdata, &clen);
@@ -1190,8 +1196,9 @@ static void *zcache_pampd_create(char *data, size_t size, bool raw, int eph,
1190 } 1196 }
1191 /* reject if mean compression is too poor */ 1197 /* reject if mean compression is too poor */
1192 if ((clen > zv_max_mean_zsize) && (curr_pers_pampd_count > 0)) { 1198 if ((clen > zv_max_mean_zsize) && (curr_pers_pampd_count > 0)) {
1193 zv_mean_zsize = xv_get_total_size_bytes(cli->xvpool) / 1199 total_zsize = xv_get_total_size_bytes(cli->xvpool);
1194 curr_pers_pampd_count; 1200 zv_mean_zsize = div_u64(total_zsize,
1201 curr_pers_pampd_count);
1195 if (zv_mean_zsize > zv_max_mean_zsize) { 1202 if (zv_mean_zsize > zv_max_mean_zsize) {
1196 zcache_mean_compress_poor++; 1203 zcache_mean_compress_poor++;
1197 goto out; 1204 goto out;
@@ -1220,7 +1227,7 @@ static int zcache_pampd_get_data(char *data, size_t *bufsize, bool raw,
1220 int ret = 0; 1227 int ret = 0;
1221 1228
1222 BUG_ON(is_ephemeral(pool)); 1229 BUG_ON(is_ephemeral(pool));
1223 zv_decompress(virt_to_page(data), pampd); 1230 zv_decompress((struct page *)(data), pampd);
1224 return ret; 1231 return ret;
1225} 1232}
1226 1233
@@ -1235,7 +1242,7 @@ static int zcache_pampd_get_data_and_free(char *data, size_t *bufsize, bool raw,
1235 int ret = 0; 1242 int ret = 0;
1236 1243
1237 BUG_ON(!is_ephemeral(pool)); 1244 BUG_ON(!is_ephemeral(pool));
1238 zbud_decompress(virt_to_page(data), pampd); 1245 zbud_decompress((struct page *)(data), pampd);
1239 zbud_free_and_delist((struct zbud_hdr *)pampd); 1246 zbud_free_and_delist((struct zbud_hdr *)pampd);
1240 atomic_dec(&zcache_curr_eph_pampd_count); 1247 atomic_dec(&zcache_curr_eph_pampd_count);
1241 return ret; 1248 return ret;
@@ -1532,7 +1539,7 @@ static int zcache_put_page(int cli_id, int pool_id, struct tmem_oid *oidp,
1532 goto out; 1539 goto out;
1533 if (!zcache_freeze && zcache_do_preload(pool) == 0) { 1540 if (!zcache_freeze && zcache_do_preload(pool) == 0) {
1534 /* preload does preempt_disable on success */ 1541 /* preload does preempt_disable on success */
1535 ret = tmem_put(pool, oidp, index, page_address(page), 1542 ret = tmem_put(pool, oidp, index, (char *)(page),
1536 PAGE_SIZE, 0, is_ephemeral(pool)); 1543 PAGE_SIZE, 0, is_ephemeral(pool));
1537 if (ret < 0) { 1544 if (ret < 0) {
1538 if (is_ephemeral(pool)) 1545 if (is_ephemeral(pool))
@@ -1565,7 +1572,7 @@ static int zcache_get_page(int cli_id, int pool_id, struct tmem_oid *oidp,
1565 pool = zcache_get_pool_by_id(cli_id, pool_id); 1572 pool = zcache_get_pool_by_id(cli_id, pool_id);
1566 if (likely(pool != NULL)) { 1573 if (likely(pool != NULL)) {
1567 if (atomic_read(&pool->obj_count) > 0) 1574 if (atomic_read(&pool->obj_count) > 0)
1568 ret = tmem_get(pool, oidp, index, page_address(page), 1575 ret = tmem_get(pool, oidp, index, (char *)(page),
1569 &size, 0, is_ephemeral(pool)); 1576 &size, 0, is_ephemeral(pool));
1570 zcache_put_pool(pool); 1577 zcache_put_pool(pool);
1571 } 1578 }
@@ -1929,9 +1936,9 @@ __setup("nofrontswap", no_frontswap);
1929 1936
1930static int __init zcache_init(void) 1937static int __init zcache_init(void)
1931{ 1938{
1932#ifdef CONFIG_SYSFS
1933 int ret = 0; 1939 int ret = 0;
1934 1940
1941#ifdef CONFIG_SYSFS
1935 ret = sysfs_create_group(mm_kobj, &zcache_attr_group); 1942 ret = sysfs_create_group(mm_kobj, &zcache_attr_group);
1936 if (ret) { 1943 if (ret) {
1937 pr_err("zcache: can't create sysfs\n"); 1944 pr_err("zcache: can't create sysfs\n");
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index c24fb10de60b..6a4ea29c2f36 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -2243,7 +2243,6 @@ static int iscsit_handle_snack(
2243 case 0: 2243 case 0:
2244 return iscsit_handle_recovery_datain_or_r2t(conn, buf, 2244 return iscsit_handle_recovery_datain_or_r2t(conn, buf,
2245 hdr->itt, hdr->ttt, hdr->begrun, hdr->runlength); 2245 hdr->itt, hdr->ttt, hdr->begrun, hdr->runlength);
2246 return 0;
2247 case ISCSI_FLAG_SNACK_TYPE_STATUS: 2246 case ISCSI_FLAG_SNACK_TYPE_STATUS:
2248 return iscsit_handle_status_snack(conn, hdr->itt, hdr->ttt, 2247 return iscsit_handle_status_snack(conn, hdr->itt, hdr->ttt,
2249 hdr->begrun, hdr->runlength); 2248 hdr->begrun, hdr->runlength);
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index f095e65b1ccf..f1643dbf6a92 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -268,7 +268,7 @@ struct se_tpg_np *lio_target_call_addnptotpg(
268 ISCSI_TCP); 268 ISCSI_TCP);
269 if (IS_ERR(tpg_np)) { 269 if (IS_ERR(tpg_np)) {
270 iscsit_put_tpg(tpg); 270 iscsit_put_tpg(tpg);
271 return ERR_PTR(PTR_ERR(tpg_np)); 271 return ERR_CAST(tpg_np);
272 } 272 }
273 pr_debug("LIO_Target_ConfigFS: addnptotpg done!\n"); 273 pr_debug("LIO_Target_ConfigFS: addnptotpg done!\n");
274 274
@@ -1285,7 +1285,7 @@ struct se_wwn *lio_target_call_coreaddtiqn(
1285 1285
1286 tiqn = iscsit_add_tiqn((unsigned char *)name); 1286 tiqn = iscsit_add_tiqn((unsigned char *)name);
1287 if (IS_ERR(tiqn)) 1287 if (IS_ERR(tiqn))
1288 return ERR_PTR(PTR_ERR(tiqn)); 1288 return ERR_CAST(tiqn);
1289 /* 1289 /*
1290 * Setup struct iscsi_wwn_stat_grps for se_wwn->fabric_stat_group. 1290 * Setup struct iscsi_wwn_stat_grps for se_wwn->fabric_stat_group.
1291 */ 1291 */
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
index 980650792cf6..c4c68da3e500 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.c
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -834,7 +834,7 @@ static int iscsit_attach_ooo_cmdsn(
834 */ 834 */
835 list_for_each_entry(ooo_tmp, &sess->sess_ooo_cmdsn_list, 835 list_for_each_entry(ooo_tmp, &sess->sess_ooo_cmdsn_list,
836 ooo_list) { 836 ooo_list) {
837 while (ooo_tmp->cmdsn < ooo_cmdsn->cmdsn) 837 if (ooo_tmp->cmdsn < ooo_cmdsn->cmdsn)
838 continue; 838 continue;
839 839
840 list_add(&ooo_cmdsn->ooo_list, 840 list_add(&ooo_cmdsn->ooo_list,
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index bcaf82f47037..daad362a93ce 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -1013,19 +1013,9 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
1013 ISCSI_LOGIN_STATUS_TARGET_ERROR); 1013 ISCSI_LOGIN_STATUS_TARGET_ERROR);
1014 goto new_sess_out; 1014 goto new_sess_out;
1015 } 1015 }
1016#if 0 1016 snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c",
1017 if (!iscsi_ntop6((const unsigned char *) 1017 &sock_in6.sin6_addr.in6_u);
1018 &sock_in6.sin6_addr.in6_u, 1018 conn->login_port = ntohs(sock_in6.sin6_port);
1019 (char *)&conn->ipv6_login_ip[0],
1020 IPV6_ADDRESS_SPACE)) {
1021 pr_err("iscsi_ntop6() failed\n");
1022 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
1023 ISCSI_LOGIN_STATUS_TARGET_ERROR);
1024 goto new_sess_out;
1025 }
1026#else
1027 pr_debug("Skipping iscsi_ntop6()\n");
1028#endif
1029 } else { 1019 } else {
1030 memset(&sock_in, 0, sizeof(struct sockaddr_in)); 1020 memset(&sock_in, 0, sizeof(struct sockaddr_in));
1031 1021
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index 252e246cf51e..5b773160200f 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -545,13 +545,13 @@ int iscsi_copy_param_list(
545 struct iscsi_param_list *src_param_list, 545 struct iscsi_param_list *src_param_list,
546 int leading) 546 int leading)
547{ 547{
548 struct iscsi_param *new_param = NULL, *param = NULL; 548 struct iscsi_param *param = NULL;
549 struct iscsi_param *new_param = NULL;
549 struct iscsi_param_list *param_list = NULL; 550 struct iscsi_param_list *param_list = NULL;
550 551
551 param_list = kzalloc(sizeof(struct iscsi_param_list), GFP_KERNEL); 552 param_list = kzalloc(sizeof(struct iscsi_param_list), GFP_KERNEL);
552 if (!param_list) { 553 if (!param_list) {
553 pr_err("Unable to allocate memory for" 554 pr_err("Unable to allocate memory for struct iscsi_param_list.\n");
554 " struct iscsi_param_list.\n");
555 goto err_out; 555 goto err_out;
556 } 556 }
557 INIT_LIST_HEAD(&param_list->param_list); 557 INIT_LIST_HEAD(&param_list->param_list);
@@ -567,8 +567,17 @@ int iscsi_copy_param_list(
567 567
568 new_param = kzalloc(sizeof(struct iscsi_param), GFP_KERNEL); 568 new_param = kzalloc(sizeof(struct iscsi_param), GFP_KERNEL);
569 if (!new_param) { 569 if (!new_param) {
570 pr_err("Unable to allocate memory for" 570 pr_err("Unable to allocate memory for struct iscsi_param.\n");
571 " struct iscsi_param.\n"); 571 goto err_out;
572 }
573
574 new_param->name = kstrdup(param->name, GFP_KERNEL);
575 new_param->value = kstrdup(param->value, GFP_KERNEL);
576 if (!new_param->value || !new_param->name) {
577 kfree(new_param->value);
578 kfree(new_param->name);
579 kfree(new_param);
580 pr_err("Unable to allocate memory for parameter name/value.\n");
572 goto err_out; 581 goto err_out;
573 } 582 }
574 583
@@ -580,32 +589,12 @@ int iscsi_copy_param_list(
580 new_param->use = param->use; 589 new_param->use = param->use;
581 new_param->type_range = param->type_range; 590 new_param->type_range = param->type_range;
582 591
583 new_param->name = kzalloc(strlen(param->name) + 1, GFP_KERNEL);
584 if (!new_param->name) {
585 pr_err("Unable to allocate memory for"
586 " parameter name.\n");
587 goto err_out;
588 }
589
590 new_param->value = kzalloc(strlen(param->value) + 1,
591 GFP_KERNEL);
592 if (!new_param->value) {
593 pr_err("Unable to allocate memory for"
594 " parameter value.\n");
595 goto err_out;
596 }
597
598 memcpy(new_param->name, param->name, strlen(param->name));
599 new_param->name[strlen(param->name)] = '\0';
600 memcpy(new_param->value, param->value, strlen(param->value));
601 new_param->value[strlen(param->value)] = '\0';
602
603 list_add_tail(&new_param->p_list, &param_list->param_list); 592 list_add_tail(&new_param->p_list, &param_list->param_list);
604 } 593 }
605 594
606 if (!list_empty(&param_list->param_list)) 595 if (!list_empty(&param_list->param_list)) {
607 *dst_param_list = param_list; 596 *dst_param_list = param_list;
608 else { 597 } else {
609 pr_err("No parameters allocated.\n"); 598 pr_err("No parameters allocated.\n");
610 goto err_out; 599 goto err_out;
611 } 600 }
@@ -1441,7 +1430,7 @@ static int iscsi_enforce_integrity_rules(
1441 u8 DataSequenceInOrder = 0; 1430 u8 DataSequenceInOrder = 0;
1442 u8 ErrorRecoveryLevel = 0, SessionType = 0; 1431 u8 ErrorRecoveryLevel = 0, SessionType = 0;
1443 u8 IFMarker = 0, OFMarker = 0; 1432 u8 IFMarker = 0, OFMarker = 0;
1444 u8 IFMarkInt_Reject = 0, OFMarkInt_Reject = 0; 1433 u8 IFMarkInt_Reject = 1, OFMarkInt_Reject = 1;
1445 u32 FirstBurstLength = 0, MaxBurstLength = 0; 1434 u32 FirstBurstLength = 0, MaxBurstLength = 0;
1446 struct iscsi_param *param = NULL; 1435 struct iscsi_param *param = NULL;
1447 1436
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index a1acb0167902..f00137f377b2 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -243,7 +243,7 @@ struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr(
243 if (!cmd->tmr_req) { 243 if (!cmd->tmr_req) {
244 pr_err("Unable to allocate memory for" 244 pr_err("Unable to allocate memory for"
245 " Task Management command!\n"); 245 " Task Management command!\n");
246 return NULL; 246 goto out;
247 } 247 }
248 /* 248 /*
249 * TASK_REASSIGN for ERL=2 / connection stays inside of 249 * TASK_REASSIGN for ERL=2 / connection stays inside of
@@ -298,8 +298,6 @@ struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr(
298 return cmd; 298 return cmd;
299out: 299out:
300 iscsit_release_cmd(cmd); 300 iscsit_release_cmd(cmd);
301 if (se_cmd)
302 transport_free_se_cmd(se_cmd);
303 return NULL; 301 return NULL;
304} 302}
305 303
@@ -877,40 +875,6 @@ void iscsit_inc_session_usage_count(struct iscsi_session *sess)
877} 875}
878 876
879/* 877/*
880 * Used before iscsi_do[rx,tx]_data() to determine iov and [rx,tx]_marker
881 * array counts needed for sync and steering.
882 */
883static int iscsit_determine_sync_and_steering_counts(
884 struct iscsi_conn *conn,
885 struct iscsi_data_count *count)
886{
887 u32 length = count->data_length;
888 u32 marker, markint;
889
890 count->sync_and_steering = 1;
891
892 marker = (count->type == ISCSI_RX_DATA) ?
893 conn->of_marker : conn->if_marker;
894 markint = (count->type == ISCSI_RX_DATA) ?
895 (conn->conn_ops->OFMarkInt * 4) :
896 (conn->conn_ops->IFMarkInt * 4);
897 count->ss_iov_count = count->iov_count;
898
899 while (length > 0) {
900 if (length >= marker) {
901 count->ss_iov_count += 3;
902 count->ss_marker_count += 2;
903
904 length -= marker;
905 marker = markint;
906 } else
907 length = 0;
908 }
909
910 return 0;
911}
912
913/*
914 * Setup conn->if_marker and conn->of_marker values based upon 878 * Setup conn->if_marker and conn->of_marker values based upon
915 * the initial marker-less interval. (see iSCSI v19 A.2) 879 * the initial marker-less interval. (see iSCSI v19 A.2)
916 */ 880 */
@@ -1292,7 +1256,7 @@ int iscsit_fe_sendpage_sg(
1292 struct kvec iov; 1256 struct kvec iov;
1293 u32 tx_hdr_size, data_len; 1257 u32 tx_hdr_size, data_len;
1294 u32 offset = cmd->first_data_sg_off; 1258 u32 offset = cmd->first_data_sg_off;
1295 int tx_sent; 1259 int tx_sent, iov_off;
1296 1260
1297send_hdr: 1261send_hdr:
1298 tx_hdr_size = ISCSI_HDR_LEN; 1262 tx_hdr_size = ISCSI_HDR_LEN;
@@ -1312,9 +1276,19 @@ send_hdr:
1312 } 1276 }
1313 1277
1314 data_len = cmd->tx_size - tx_hdr_size - cmd->padding; 1278 data_len = cmd->tx_size - tx_hdr_size - cmd->padding;
1315 if (conn->conn_ops->DataDigest) 1279 /*
1280 * Set iov_off used by padding and data digest tx_data() calls below
1281 * in order to determine proper offset into cmd->iov_data[]
1282 */
1283 if (conn->conn_ops->DataDigest) {
1316 data_len -= ISCSI_CRC_LEN; 1284 data_len -= ISCSI_CRC_LEN;
1317 1285 if (cmd->padding)
1286 iov_off = (cmd->iov_data_count - 2);
1287 else
1288 iov_off = (cmd->iov_data_count - 1);
1289 } else {
1290 iov_off = (cmd->iov_data_count - 1);
1291 }
1318 /* 1292 /*
1319 * Perform sendpage() for each page in the scatterlist 1293 * Perform sendpage() for each page in the scatterlist
1320 */ 1294 */
@@ -1343,8 +1317,7 @@ send_pg:
1343 1317
1344send_padding: 1318send_padding:
1345 if (cmd->padding) { 1319 if (cmd->padding) {
1346 struct kvec *iov_p = 1320 struct kvec *iov_p = &cmd->iov_data[iov_off++];
1347 &cmd->iov_data[cmd->iov_data_count-1];
1348 1321
1349 tx_sent = tx_data(conn, iov_p, 1, cmd->padding); 1322 tx_sent = tx_data(conn, iov_p, 1, cmd->padding);
1350 if (cmd->padding != tx_sent) { 1323 if (cmd->padding != tx_sent) {
@@ -1358,8 +1331,7 @@ send_padding:
1358 1331
1359send_datacrc: 1332send_datacrc:
1360 if (conn->conn_ops->DataDigest) { 1333 if (conn->conn_ops->DataDigest) {
1361 struct kvec *iov_d = 1334 struct kvec *iov_d = &cmd->iov_data[iov_off];
1362 &cmd->iov_data[cmd->iov_data_count];
1363 1335
1364 tx_sent = tx_data(conn, iov_d, 1, ISCSI_CRC_LEN); 1336 tx_sent = tx_data(conn, iov_d, 1, ISCSI_CRC_LEN);
1365 if (ISCSI_CRC_LEN != tx_sent) { 1337 if (ISCSI_CRC_LEN != tx_sent) {
@@ -1433,8 +1405,7 @@ static int iscsit_do_rx_data(
1433 struct iscsi_data_count *count) 1405 struct iscsi_data_count *count)
1434{ 1406{
1435 int data = count->data_length, rx_loop = 0, total_rx = 0, iov_len; 1407 int data = count->data_length, rx_loop = 0, total_rx = 0, iov_len;
1436 u32 rx_marker_val[count->ss_marker_count], rx_marker_iov = 0; 1408 struct kvec *iov_p;
1437 struct kvec iov[count->ss_iov_count], *iov_p;
1438 struct msghdr msg; 1409 struct msghdr msg;
1439 1410
1440 if (!conn || !conn->sock || !conn->conn_ops) 1411 if (!conn || !conn->sock || !conn->conn_ops)
@@ -1442,93 +1413,8 @@ static int iscsit_do_rx_data(
1442 1413
1443 memset(&msg, 0, sizeof(struct msghdr)); 1414 memset(&msg, 0, sizeof(struct msghdr));
1444 1415
1445 if (count->sync_and_steering) { 1416 iov_p = count->iov;
1446 int size = 0; 1417 iov_len = count->iov_count;
1447 u32 i, orig_iov_count = 0;
1448 u32 orig_iov_len = 0, orig_iov_loc = 0;
1449 u32 iov_count = 0, per_iov_bytes = 0;
1450 u32 *rx_marker, old_rx_marker = 0;
1451 struct kvec *iov_record;
1452
1453 memset(&rx_marker_val, 0,
1454 count->ss_marker_count * sizeof(u32));
1455 memset(&iov, 0, count->ss_iov_count * sizeof(struct kvec));
1456
1457 iov_record = count->iov;
1458 orig_iov_count = count->iov_count;
1459 rx_marker = &conn->of_marker;
1460
1461 i = 0;
1462 size = data;
1463 orig_iov_len = iov_record[orig_iov_loc].iov_len;
1464 while (size > 0) {
1465 pr_debug("rx_data: #1 orig_iov_len %u,"
1466 " orig_iov_loc %u\n", orig_iov_len, orig_iov_loc);
1467 pr_debug("rx_data: #2 rx_marker %u, size"
1468 " %u\n", *rx_marker, size);
1469
1470 if (orig_iov_len >= *rx_marker) {
1471 iov[iov_count].iov_len = *rx_marker;
1472 iov[iov_count++].iov_base =
1473 (iov_record[orig_iov_loc].iov_base +
1474 per_iov_bytes);
1475
1476 iov[iov_count].iov_len = (MARKER_SIZE / 2);
1477 iov[iov_count++].iov_base =
1478 &rx_marker_val[rx_marker_iov++];
1479 iov[iov_count].iov_len = (MARKER_SIZE / 2);
1480 iov[iov_count++].iov_base =
1481 &rx_marker_val[rx_marker_iov++];
1482 old_rx_marker = *rx_marker;
1483
1484 /*
1485 * OFMarkInt is in 32-bit words.
1486 */
1487 *rx_marker = (conn->conn_ops->OFMarkInt * 4);
1488 size -= old_rx_marker;
1489 orig_iov_len -= old_rx_marker;
1490 per_iov_bytes += old_rx_marker;
1491
1492 pr_debug("rx_data: #3 new_rx_marker"
1493 " %u, size %u\n", *rx_marker, size);
1494 } else {
1495 iov[iov_count].iov_len = orig_iov_len;
1496 iov[iov_count++].iov_base =
1497 (iov_record[orig_iov_loc].iov_base +
1498 per_iov_bytes);
1499
1500 per_iov_bytes = 0;
1501 *rx_marker -= orig_iov_len;
1502 size -= orig_iov_len;
1503
1504 if (size)
1505 orig_iov_len =
1506 iov_record[++orig_iov_loc].iov_len;
1507
1508 pr_debug("rx_data: #4 new_rx_marker"
1509 " %u, size %u\n", *rx_marker, size);
1510 }
1511 }
1512 data += (rx_marker_iov * (MARKER_SIZE / 2));
1513
1514 iov_p = &iov[0];
1515 iov_len = iov_count;
1516
1517 if (iov_count > count->ss_iov_count) {
1518 pr_err("iov_count: %d, count->ss_iov_count:"
1519 " %d\n", iov_count, count->ss_iov_count);
1520 return -1;
1521 }
1522 if (rx_marker_iov > count->ss_marker_count) {
1523 pr_err("rx_marker_iov: %d, count->ss_marker"
1524 "_count: %d\n", rx_marker_iov,
1525 count->ss_marker_count);
1526 return -1;
1527 }
1528 } else {
1529 iov_p = count->iov;
1530 iov_len = count->iov_count;
1531 }
1532 1418
1533 while (total_rx < data) { 1419 while (total_rx < data) {
1534 rx_loop = kernel_recvmsg(conn->sock, &msg, iov_p, iov_len, 1420 rx_loop = kernel_recvmsg(conn->sock, &msg, iov_p, iov_len,
@@ -1543,16 +1429,6 @@ static int iscsit_do_rx_data(
1543 rx_loop, total_rx, data); 1429 rx_loop, total_rx, data);
1544 } 1430 }
1545 1431
1546 if (count->sync_and_steering) {
1547 int j;
1548 for (j = 0; j < rx_marker_iov; j++) {
1549 pr_debug("rx_data: #5 j: %d, offset: %d\n",
1550 j, rx_marker_val[j]);
1551 conn->of_marker_offset = rx_marker_val[j];
1552 }
1553 total_rx -= (rx_marker_iov * (MARKER_SIZE / 2));
1554 }
1555
1556 return total_rx; 1432 return total_rx;
1557} 1433}
1558 1434
@@ -1561,8 +1437,7 @@ static int iscsit_do_tx_data(
1561 struct iscsi_data_count *count) 1437 struct iscsi_data_count *count)
1562{ 1438{
1563 int data = count->data_length, total_tx = 0, tx_loop = 0, iov_len; 1439 int data = count->data_length, total_tx = 0, tx_loop = 0, iov_len;
1564 u32 tx_marker_val[count->ss_marker_count], tx_marker_iov = 0; 1440 struct kvec *iov_p;
1565 struct kvec iov[count->ss_iov_count], *iov_p;
1566 struct msghdr msg; 1441 struct msghdr msg;
1567 1442
1568 if (!conn || !conn->sock || !conn->conn_ops) 1443 if (!conn || !conn->sock || !conn->conn_ops)
@@ -1575,98 +1450,8 @@ static int iscsit_do_tx_data(
1575 1450
1576 memset(&msg, 0, sizeof(struct msghdr)); 1451 memset(&msg, 0, sizeof(struct msghdr));
1577 1452
1578 if (count->sync_and_steering) { 1453 iov_p = count->iov;
1579 int size = 0; 1454 iov_len = count->iov_count;
1580 u32 i, orig_iov_count = 0;
1581 u32 orig_iov_len = 0, orig_iov_loc = 0;
1582 u32 iov_count = 0, per_iov_bytes = 0;
1583 u32 *tx_marker, old_tx_marker = 0;
1584 struct kvec *iov_record;
1585
1586 memset(&tx_marker_val, 0,
1587 count->ss_marker_count * sizeof(u32));
1588 memset(&iov, 0, count->ss_iov_count * sizeof(struct kvec));
1589
1590 iov_record = count->iov;
1591 orig_iov_count = count->iov_count;
1592 tx_marker = &conn->if_marker;
1593
1594 i = 0;
1595 size = data;
1596 orig_iov_len = iov_record[orig_iov_loc].iov_len;
1597 while (size > 0) {
1598 pr_debug("tx_data: #1 orig_iov_len %u,"
1599 " orig_iov_loc %u\n", orig_iov_len, orig_iov_loc);
1600 pr_debug("tx_data: #2 tx_marker %u, size"
1601 " %u\n", *tx_marker, size);
1602
1603 if (orig_iov_len >= *tx_marker) {
1604 iov[iov_count].iov_len = *tx_marker;
1605 iov[iov_count++].iov_base =
1606 (iov_record[orig_iov_loc].iov_base +
1607 per_iov_bytes);
1608
1609 tx_marker_val[tx_marker_iov] =
1610 (size - *tx_marker);
1611 iov[iov_count].iov_len = (MARKER_SIZE / 2);
1612 iov[iov_count++].iov_base =
1613 &tx_marker_val[tx_marker_iov++];
1614 iov[iov_count].iov_len = (MARKER_SIZE / 2);
1615 iov[iov_count++].iov_base =
1616 &tx_marker_val[tx_marker_iov++];
1617 old_tx_marker = *tx_marker;
1618
1619 /*
1620 * IFMarkInt is in 32-bit words.
1621 */
1622 *tx_marker = (conn->conn_ops->IFMarkInt * 4);
1623 size -= old_tx_marker;
1624 orig_iov_len -= old_tx_marker;
1625 per_iov_bytes += old_tx_marker;
1626
1627 pr_debug("tx_data: #3 new_tx_marker"
1628 " %u, size %u\n", *tx_marker, size);
1629 pr_debug("tx_data: #4 offset %u\n",
1630 tx_marker_val[tx_marker_iov-1]);
1631 } else {
1632 iov[iov_count].iov_len = orig_iov_len;
1633 iov[iov_count++].iov_base
1634 = (iov_record[orig_iov_loc].iov_base +
1635 per_iov_bytes);
1636
1637 per_iov_bytes = 0;
1638 *tx_marker -= orig_iov_len;
1639 size -= orig_iov_len;
1640
1641 if (size)
1642 orig_iov_len =
1643 iov_record[++orig_iov_loc].iov_len;
1644
1645 pr_debug("tx_data: #5 new_tx_marker"
1646 " %u, size %u\n", *tx_marker, size);
1647 }
1648 }
1649
1650 data += (tx_marker_iov * (MARKER_SIZE / 2));
1651
1652 iov_p = &iov[0];
1653 iov_len = iov_count;
1654
1655 if (iov_count > count->ss_iov_count) {
1656 pr_err("iov_count: %d, count->ss_iov_count:"
1657 " %d\n", iov_count, count->ss_iov_count);
1658 return -1;
1659 }
1660 if (tx_marker_iov > count->ss_marker_count) {
1661 pr_err("tx_marker_iov: %d, count->ss_marker"
1662 "_count: %d\n", tx_marker_iov,
1663 count->ss_marker_count);
1664 return -1;
1665 }
1666 } else {
1667 iov_p = count->iov;
1668 iov_len = count->iov_count;
1669 }
1670 1455
1671 while (total_tx < data) { 1456 while (total_tx < data) {
1672 tx_loop = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len, 1457 tx_loop = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len,
@@ -1681,9 +1466,6 @@ static int iscsit_do_tx_data(
1681 tx_loop, total_tx, data); 1466 tx_loop, total_tx, data);
1682 } 1467 }
1683 1468
1684 if (count->sync_and_steering)
1685 total_tx -= (tx_marker_iov * (MARKER_SIZE / 2));
1686
1687 return total_tx; 1469 return total_tx;
1688} 1470}
1689 1471
@@ -1704,12 +1486,6 @@ int rx_data(
1704 c.data_length = data; 1486 c.data_length = data;
1705 c.type = ISCSI_RX_DATA; 1487 c.type = ISCSI_RX_DATA;
1706 1488
1707 if (conn->conn_ops->OFMarker &&
1708 (conn->conn_state >= TARG_CONN_STATE_LOGGED_IN)) {
1709 if (iscsit_determine_sync_and_steering_counts(conn, &c) < 0)
1710 return -1;
1711 }
1712
1713 return iscsit_do_rx_data(conn, &c); 1489 return iscsit_do_rx_data(conn, &c);
1714} 1490}
1715 1491
@@ -1730,12 +1506,6 @@ int tx_data(
1730 c.data_length = data; 1506 c.data_length = data;
1731 c.type = ISCSI_TX_DATA; 1507 c.type = ISCSI_TX_DATA;
1732 1508
1733 if (conn->conn_ops->IFMarker &&
1734 (conn->conn_state >= TARG_CONN_STATE_LOGGED_IN)) {
1735 if (iscsit_determine_sync_and_steering_counts(conn, &c) < 0)
1736 return -1;
1737 }
1738
1739 return iscsit_do_tx_data(conn, &c); 1509 return iscsit_do_tx_data(conn, &c);
1740} 1510}
1741 1511
diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
index 8ae09a1bdf74..f04d4ef99dca 100644
--- a/drivers/target/target_core_cdb.c
+++ b/drivers/target/target_core_cdb.c
@@ -24,6 +24,7 @@
24 */ 24 */
25 25
26#include <linux/kernel.h> 26#include <linux/kernel.h>
27#include <linux/ctype.h>
27#include <asm/unaligned.h> 28#include <asm/unaligned.h>
28#include <scsi/scsi.h> 29#include <scsi/scsi.h>
29 30
@@ -67,6 +68,7 @@ target_emulate_inquiry_std(struct se_cmd *cmd)
67{ 68{
68 struct se_lun *lun = cmd->se_lun; 69 struct se_lun *lun = cmd->se_lun;
69 struct se_device *dev = cmd->se_dev; 70 struct se_device *dev = cmd->se_dev;
71 struct se_portal_group *tpg = lun->lun_sep->sep_tpg;
70 unsigned char *buf; 72 unsigned char *buf;
71 73
72 /* 74 /*
@@ -81,9 +83,13 @@ target_emulate_inquiry_std(struct se_cmd *cmd)
81 83
82 buf = transport_kmap_first_data_page(cmd); 84 buf = transport_kmap_first_data_page(cmd);
83 85
84 buf[0] = dev->transport->get_device_type(dev); 86 if (dev == tpg->tpg_virt_lun0.lun_se_dev) {
85 if (buf[0] == TYPE_TAPE) 87 buf[0] = 0x3f; /* Not connected */
86 buf[1] = 0x80; 88 } else {
89 buf[0] = dev->transport->get_device_type(dev);
90 if (buf[0] == TYPE_TAPE)
91 buf[1] = 0x80;
92 }
87 buf[2] = dev->transport->get_device_rev(dev); 93 buf[2] = dev->transport->get_device_rev(dev);
88 94
89 /* 95 /*
@@ -149,6 +155,37 @@ target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
149 return 0; 155 return 0;
150} 156}
151 157
158static void
159target_parse_naa_6h_vendor_specific(struct se_device *dev, unsigned char *buf_off)
160{
161 unsigned char *p = &dev->se_sub_dev->t10_wwn.unit_serial[0];
162 unsigned char *buf = buf_off;
163 int cnt = 0, next = 1;
164 /*
165 * Generate up to 36 bits of VENDOR SPECIFIC IDENTIFIER starting on
166 * byte 3 bit 3-0 for NAA IEEE Registered Extended DESIGNATOR field
167 * format, followed by 64 bits of VENDOR SPECIFIC IDENTIFIER EXTENSION
168 * to complete the payload. These are based from VPD=0x80 PRODUCT SERIAL
169 * NUMBER set via vpd_unit_serial in target_core_configfs.c to ensure
170 * per device uniqeness.
171 */
172 while (*p != '\0') {
173 if (cnt >= 13)
174 break;
175 if (!isxdigit(*p)) {
176 p++;
177 continue;
178 }
179 if (next != 0) {
180 buf[cnt++] |= hex_to_bin(*p++);
181 next = 0;
182 } else {
183 buf[cnt] = hex_to_bin(*p++) << 4;
184 next = 1;
185 }
186 }
187}
188
152/* 189/*
153 * Device identification VPD, for a complete list of 190 * Device identification VPD, for a complete list of
154 * DESIGNATOR TYPEs see spc4r17 Table 459. 191 * DESIGNATOR TYPEs see spc4r17 Table 459.
@@ -214,8 +251,7 @@ target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
214 * VENDOR_SPECIFIC_IDENTIFIER and 251 * VENDOR_SPECIFIC_IDENTIFIER and
215 * VENDOR_SPECIFIC_IDENTIFIER_EXTENTION 252 * VENDOR_SPECIFIC_IDENTIFIER_EXTENTION
216 */ 253 */
217 buf[off++] |= hex_to_bin(dev->se_sub_dev->t10_wwn.unit_serial[0]); 254 target_parse_naa_6h_vendor_specific(dev, &buf[off]);
218 hex2bin(&buf[off], &dev->se_sub_dev->t10_wwn.unit_serial[1], 12);
219 255
220 len = 20; 256 len = 20;
221 off = (len + 4); 257 off = (len + 4);
@@ -915,8 +951,8 @@ target_emulate_modesense(struct se_cmd *cmd, int ten)
915 length += target_modesense_control(dev, &buf[offset+length]); 951 length += target_modesense_control(dev, &buf[offset+length]);
916 break; 952 break;
917 default: 953 default:
918 pr_err("Got Unknown Mode Page: 0x%02x\n", 954 pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n",
919 cdb[2] & 0x3f); 955 cdb[2] & 0x3f, cdb[3]);
920 return PYX_TRANSPORT_UNKNOWN_MODE_PAGE; 956 return PYX_TRANSPORT_UNKNOWN_MODE_PAGE;
921 } 957 }
922 offset += length; 958 offset += length;
@@ -1072,8 +1108,6 @@ target_emulate_unmap(struct se_task *task)
1072 size -= 16; 1108 size -= 16;
1073 } 1109 }
1074 1110
1075 task->task_scsi_status = GOOD;
1076 transport_complete_task(task, 1);
1077err: 1111err:
1078 transport_kunmap_first_data_page(cmd); 1112 transport_kunmap_first_data_page(cmd);
1079 1113
@@ -1085,24 +1119,17 @@ err:
1085 * Note this is not used for TCM/pSCSI passthrough 1119 * Note this is not used for TCM/pSCSI passthrough
1086 */ 1120 */
1087static int 1121static int
1088target_emulate_write_same(struct se_task *task, int write_same32) 1122target_emulate_write_same(struct se_task *task, u32 num_blocks)
1089{ 1123{
1090 struct se_cmd *cmd = task->task_se_cmd; 1124 struct se_cmd *cmd = task->task_se_cmd;
1091 struct se_device *dev = cmd->se_dev; 1125 struct se_device *dev = cmd->se_dev;
1092 sector_t range; 1126 sector_t range;
1093 sector_t lba = cmd->t_task_lba; 1127 sector_t lba = cmd->t_task_lba;
1094 unsigned int num_blocks;
1095 int ret; 1128 int ret;
1096 /* 1129 /*
1097 * Extract num_blocks from the WRITE_SAME_* CDB. Then use the explict 1130 * Use the explicit range when non zero is supplied, otherwise calculate
1098 * range when non zero is supplied, otherwise calculate the remaining 1131 * the remaining range based on ->get_blocks() - starting LBA.
1099 * range based on ->get_blocks() - starting LBA.
1100 */ 1132 */
1101 if (write_same32)
1102 num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]);
1103 else
1104 num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]);
1105
1106 if (num_blocks != 0) 1133 if (num_blocks != 0)
1107 range = num_blocks; 1134 range = num_blocks;
1108 else 1135 else
@@ -1117,8 +1144,6 @@ target_emulate_write_same(struct se_task *task, int write_same32)
1117 return ret; 1144 return ret;
1118 } 1145 }
1119 1146
1120 task->task_scsi_status = GOOD;
1121 transport_complete_task(task, 1);
1122 return 0; 1147 return 0;
1123} 1148}
1124 1149
@@ -1165,13 +1190,23 @@ transport_emulate_control_cdb(struct se_task *task)
1165 } 1190 }
1166 ret = target_emulate_unmap(task); 1191 ret = target_emulate_unmap(task);
1167 break; 1192 break;
1193 case WRITE_SAME:
1194 if (!dev->transport->do_discard) {
1195 pr_err("WRITE_SAME emulation not supported"
1196 " for: %s\n", dev->transport->name);
1197 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
1198 }
1199 ret = target_emulate_write_same(task,
1200 get_unaligned_be16(&cmd->t_task_cdb[7]));
1201 break;
1168 case WRITE_SAME_16: 1202 case WRITE_SAME_16:
1169 if (!dev->transport->do_discard) { 1203 if (!dev->transport->do_discard) {
1170 pr_err("WRITE_SAME_16 emulation not supported" 1204 pr_err("WRITE_SAME_16 emulation not supported"
1171 " for: %s\n", dev->transport->name); 1205 " for: %s\n", dev->transport->name);
1172 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; 1206 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
1173 } 1207 }
1174 ret = target_emulate_write_same(task, 0); 1208 ret = target_emulate_write_same(task,
1209 get_unaligned_be32(&cmd->t_task_cdb[10]));
1175 break; 1210 break;
1176 case VARIABLE_LENGTH_CMD: 1211 case VARIABLE_LENGTH_CMD:
1177 service_action = 1212 service_action =
@@ -1184,7 +1219,8 @@ transport_emulate_control_cdb(struct se_task *task)
1184 dev->transport->name); 1219 dev->transport->name);
1185 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; 1220 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
1186 } 1221 }
1187 ret = target_emulate_write_same(task, 1); 1222 ret = target_emulate_write_same(task,
1223 get_unaligned_be32(&cmd->t_task_cdb[28]));
1188 break; 1224 break;
1189 default: 1225 default:
1190 pr_err("Unsupported VARIABLE_LENGTH_CMD SA:" 1226 pr_err("Unsupported VARIABLE_LENGTH_CMD SA:"
@@ -1219,8 +1255,14 @@ transport_emulate_control_cdb(struct se_task *task)
1219 1255
1220 if (ret < 0) 1256 if (ret < 0)
1221 return ret; 1257 return ret;
1222 task->task_scsi_status = GOOD; 1258 /*
1223 transport_complete_task(task, 1); 1259 * Handle the successful completion here unless a caller
1260 * has explictly requested an asychronous completion.
1261 */
1262 if (!(cmd->se_cmd_flags & SCF_EMULATE_CDB_ASYNC)) {
1263 task->task_scsi_status = GOOD;
1264 transport_complete_task(task, 1);
1265 }
1224 1266
1225 return PYX_TRANSPORT_SENT_TO_TRANSPORT; 1267 return PYX_TRANSPORT_SENT_TO_TRANSPORT;
1226} 1268}
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index b38b6c993e65..ca6e4a4df134 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -472,9 +472,9 @@ void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
472 struct se_dev_entry *deve; 472 struct se_dev_entry *deve;
473 u32 i; 473 u32 i;
474 474
475 spin_lock_bh(&tpg->acl_node_lock); 475 spin_lock_irq(&tpg->acl_node_lock);
476 list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) { 476 list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
477 spin_unlock_bh(&tpg->acl_node_lock); 477 spin_unlock_irq(&tpg->acl_node_lock);
478 478
479 spin_lock_irq(&nacl->device_list_lock); 479 spin_lock_irq(&nacl->device_list_lock);
480 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { 480 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
@@ -491,9 +491,9 @@ void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
491 } 491 }
492 spin_unlock_irq(&nacl->device_list_lock); 492 spin_unlock_irq(&nacl->device_list_lock);
493 493
494 spin_lock_bh(&tpg->acl_node_lock); 494 spin_lock_irq(&tpg->acl_node_lock);
495 } 495 }
496 spin_unlock_bh(&tpg->acl_node_lock); 496 spin_unlock_irq(&tpg->acl_node_lock);
497} 497}
498 498
499static struct se_port *core_alloc_port(struct se_device *dev) 499static struct se_port *core_alloc_port(struct se_device *dev)
@@ -839,6 +839,24 @@ int se_dev_check_shutdown(struct se_device *dev)
839 return ret; 839 return ret;
840} 840}
841 841
842u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
843{
844 u32 tmp, aligned_max_sectors;
845 /*
846 * Limit max_sectors to a PAGE_SIZE aligned value for modern
847 * transport_allocate_data_tasks() operation.
848 */
849 tmp = rounddown((max_sectors * block_size), PAGE_SIZE);
850 aligned_max_sectors = (tmp / block_size);
851 if (max_sectors != aligned_max_sectors) {
852 printk(KERN_INFO "Rounding down aligned max_sectors from %u"
853 " to %u\n", max_sectors, aligned_max_sectors);
854 return aligned_max_sectors;
855 }
856
857 return max_sectors;
858}
859
842void se_dev_set_default_attribs( 860void se_dev_set_default_attribs(
843 struct se_device *dev, 861 struct se_device *dev,
844 struct se_dev_limits *dev_limits) 862 struct se_dev_limits *dev_limits)
@@ -878,6 +896,11 @@ void se_dev_set_default_attribs(
878 * max_sectors is based on subsystem plugin dependent requirements. 896 * max_sectors is based on subsystem plugin dependent requirements.
879 */ 897 */
880 dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors; 898 dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors;
899 /*
900 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
901 */
902 limits->max_sectors = se_dev_align_max_sectors(limits->max_sectors,
903 limits->logical_block_size);
881 dev->se_sub_dev->se_dev_attrib.max_sectors = limits->max_sectors; 904 dev->se_sub_dev->se_dev_attrib.max_sectors = limits->max_sectors;
882 /* 905 /*
883 * Set optimal_sectors from max_sectors, which can be lowered via 906 * Set optimal_sectors from max_sectors, which can be lowered via
@@ -1242,6 +1265,11 @@ int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors)
1242 return -EINVAL; 1265 return -EINVAL;
1243 } 1266 }
1244 } 1267 }
1268 /*
1269 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
1270 */
1271 max_sectors = se_dev_align_max_sectors(max_sectors,
1272 dev->se_sub_dev->se_dev_attrib.block_size);
1245 1273
1246 dev->se_sub_dev->se_dev_attrib.max_sectors = max_sectors; 1274 dev->se_sub_dev->se_dev_attrib.max_sectors = max_sectors;
1247 pr_debug("dev[%p]: SE Device max_sectors changed to %u\n", 1275 pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
@@ -1344,15 +1372,17 @@ struct se_lun *core_dev_add_lun(
1344 */ 1372 */
1345 if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) { 1373 if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
1346 struct se_node_acl *acl; 1374 struct se_node_acl *acl;
1347 spin_lock_bh(&tpg->acl_node_lock); 1375 spin_lock_irq(&tpg->acl_node_lock);
1348 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { 1376 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
1349 if (acl->dynamic_node_acl) { 1377 if (acl->dynamic_node_acl &&
1350 spin_unlock_bh(&tpg->acl_node_lock); 1378 (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only ||
1379 !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) {
1380 spin_unlock_irq(&tpg->acl_node_lock);
1351 core_tpg_add_node_to_devs(acl, tpg); 1381 core_tpg_add_node_to_devs(acl, tpg);
1352 spin_lock_bh(&tpg->acl_node_lock); 1382 spin_lock_irq(&tpg->acl_node_lock);
1353 } 1383 }
1354 } 1384 }
1355 spin_unlock_bh(&tpg->acl_node_lock); 1385 spin_unlock_irq(&tpg->acl_node_lock);
1356 } 1386 }
1357 1387
1358 return lun_p; 1388 return lun_p;
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index f1654694f4ea..55bbe0847a6d 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -481,7 +481,7 @@ static struct config_group *target_fabric_make_nodeacl(
481 481
482 se_nacl = tf->tf_ops.fabric_make_nodeacl(se_tpg, group, name); 482 se_nacl = tf->tf_ops.fabric_make_nodeacl(se_tpg, group, name);
483 if (IS_ERR(se_nacl)) 483 if (IS_ERR(se_nacl))
484 return ERR_PTR(PTR_ERR(se_nacl)); 484 return ERR_CAST(se_nacl);
485 485
486 nacl_cg = &se_nacl->acl_group; 486 nacl_cg = &se_nacl->acl_group;
487 nacl_cg->default_groups = se_nacl->acl_default_groups; 487 nacl_cg->default_groups = se_nacl->acl_default_groups;
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 1c1b849cd4fb..7fd3a161f7cc 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -1598,14 +1598,14 @@ static int core_scsi3_decode_spec_i_port(
1598 * from the decoded fabric module specific TransportID 1598 * from the decoded fabric module specific TransportID
1599 * at *i_str. 1599 * at *i_str.
1600 */ 1600 */
1601 spin_lock_bh(&tmp_tpg->acl_node_lock); 1601 spin_lock_irq(&tmp_tpg->acl_node_lock);
1602 dest_node_acl = __core_tpg_get_initiator_node_acl( 1602 dest_node_acl = __core_tpg_get_initiator_node_acl(
1603 tmp_tpg, i_str); 1603 tmp_tpg, i_str);
1604 if (dest_node_acl) { 1604 if (dest_node_acl) {
1605 atomic_inc(&dest_node_acl->acl_pr_ref_count); 1605 atomic_inc(&dest_node_acl->acl_pr_ref_count);
1606 smp_mb__after_atomic_inc(); 1606 smp_mb__after_atomic_inc();
1607 } 1607 }
1608 spin_unlock_bh(&tmp_tpg->acl_node_lock); 1608 spin_unlock_irq(&tmp_tpg->acl_node_lock);
1609 1609
1610 if (!dest_node_acl) { 1610 if (!dest_node_acl) {
1611 core_scsi3_tpg_undepend_item(tmp_tpg); 1611 core_scsi3_tpg_undepend_item(tmp_tpg);
@@ -3496,14 +3496,14 @@ after_iport_check:
3496 /* 3496 /*
3497 * Locate the destination struct se_node_acl from the received Transport ID 3497 * Locate the destination struct se_node_acl from the received Transport ID
3498 */ 3498 */
3499 spin_lock_bh(&dest_se_tpg->acl_node_lock); 3499 spin_lock_irq(&dest_se_tpg->acl_node_lock);
3500 dest_node_acl = __core_tpg_get_initiator_node_acl(dest_se_tpg, 3500 dest_node_acl = __core_tpg_get_initiator_node_acl(dest_se_tpg,
3501 initiator_str); 3501 initiator_str);
3502 if (dest_node_acl) { 3502 if (dest_node_acl) {
3503 atomic_inc(&dest_node_acl->acl_pr_ref_count); 3503 atomic_inc(&dest_node_acl->acl_pr_ref_count);
3504 smp_mb__after_atomic_inc(); 3504 smp_mb__after_atomic_inc();
3505 } 3505 }
3506 spin_unlock_bh(&dest_se_tpg->acl_node_lock); 3506 spin_unlock_irq(&dest_se_tpg->acl_node_lock);
3507 3507
3508 if (!dest_node_acl) { 3508 if (!dest_node_acl) {
3509 pr_err("Unable to locate %s dest_node_acl for" 3509 pr_err("Unable to locate %s dest_node_acl for"
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 3dd81d24d9a9..e567e129c697 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -390,12 +390,10 @@ static int rd_MEMCPY_read(struct rd_request *req)
390 length = req->rd_size; 390 length = req->rd_size;
391 391
392 dst = sg_virt(&sg_d[i++]) + dst_offset; 392 dst = sg_virt(&sg_d[i++]) + dst_offset;
393 if (!dst) 393 BUG_ON(!dst);
394 BUG();
395 394
396 src = sg_virt(&sg_s[j]) + src_offset; 395 src = sg_virt(&sg_s[j]) + src_offset;
397 if (!src) 396 BUG_ON(!src);
398 BUG();
399 397
400 dst_offset = 0; 398 dst_offset = 0;
401 src_offset = length; 399 src_offset = length;
@@ -415,8 +413,7 @@ static int rd_MEMCPY_read(struct rd_request *req)
415 length = req->rd_size; 413 length = req->rd_size;
416 414
417 dst = sg_virt(&sg_d[i]) + dst_offset; 415 dst = sg_virt(&sg_d[i]) + dst_offset;
418 if (!dst) 416 BUG_ON(!dst);
419 BUG();
420 417
421 if (sg_d[i].length == length) { 418 if (sg_d[i].length == length) {
422 i++; 419 i++;
@@ -425,8 +422,7 @@ static int rd_MEMCPY_read(struct rd_request *req)
425 dst_offset = length; 422 dst_offset = length;
426 423
427 src = sg_virt(&sg_s[j++]) + src_offset; 424 src = sg_virt(&sg_s[j++]) + src_offset;
428 if (!src) 425 BUG_ON(!src);
429 BUG();
430 426
431 src_offset = 0; 427 src_offset = 0;
432 page_end = 1; 428 page_end = 1;
@@ -510,12 +506,10 @@ static int rd_MEMCPY_write(struct rd_request *req)
510 length = req->rd_size; 506 length = req->rd_size;
511 507
512 src = sg_virt(&sg_s[i++]) + src_offset; 508 src = sg_virt(&sg_s[i++]) + src_offset;
513 if (!src) 509 BUG_ON(!src);
514 BUG();
515 510
516 dst = sg_virt(&sg_d[j]) + dst_offset; 511 dst = sg_virt(&sg_d[j]) + dst_offset;
517 if (!dst) 512 BUG_ON(!dst);
518 BUG();
519 513
520 src_offset = 0; 514 src_offset = 0;
521 dst_offset = length; 515 dst_offset = length;
@@ -535,8 +529,7 @@ static int rd_MEMCPY_write(struct rd_request *req)
535 length = req->rd_size; 529 length = req->rd_size;
536 530
537 src = sg_virt(&sg_s[i]) + src_offset; 531 src = sg_virt(&sg_s[i]) + src_offset;
538 if (!src) 532 BUG_ON(!src);
539 BUG();
540 533
541 if (sg_s[i].length == length) { 534 if (sg_s[i].length == length) {
542 i++; 535 i++;
@@ -545,8 +538,7 @@ static int rd_MEMCPY_write(struct rd_request *req)
545 src_offset = length; 538 src_offset = length;
546 539
547 dst = sg_virt(&sg_d[j++]) + dst_offset; 540 dst = sg_virt(&sg_d[j++]) + dst_offset;
548 if (!dst) 541 BUG_ON(!dst);
549 BUG();
550 542
551 dst_offset = 0; 543 dst_offset = 0;
552 page_end = 1; 544 page_end = 1;
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index 4f1ba4c5ef11..162b736c7342 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -137,15 +137,15 @@ struct se_node_acl *core_tpg_get_initiator_node_acl(
137{ 137{
138 struct se_node_acl *acl; 138 struct se_node_acl *acl;
139 139
140 spin_lock_bh(&tpg->acl_node_lock); 140 spin_lock_irq(&tpg->acl_node_lock);
141 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { 141 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
142 if (!strcmp(acl->initiatorname, initiatorname) && 142 if (!strcmp(acl->initiatorname, initiatorname) &&
143 !acl->dynamic_node_acl) { 143 !acl->dynamic_node_acl) {
144 spin_unlock_bh(&tpg->acl_node_lock); 144 spin_unlock_irq(&tpg->acl_node_lock);
145 return acl; 145 return acl;
146 } 146 }
147 } 147 }
148 spin_unlock_bh(&tpg->acl_node_lock); 148 spin_unlock_irq(&tpg->acl_node_lock);
149 149
150 return NULL; 150 return NULL;
151} 151}
@@ -298,13 +298,21 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
298 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl); 298 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
299 return NULL; 299 return NULL;
300 } 300 }
301 /*
302 * Here we only create demo-mode MappedLUNs from the active
303 * TPG LUNs if the fabric is not explictly asking for
304 * tpg_check_demo_mode_login_only() == 1.
305 */
306 if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only != NULL) &&
307 (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) == 1))
308 do { ; } while (0);
309 else
310 core_tpg_add_node_to_devs(acl, tpg);
301 311
302 core_tpg_add_node_to_devs(acl, tpg); 312 spin_lock_irq(&tpg->acl_node_lock);
303
304 spin_lock_bh(&tpg->acl_node_lock);
305 list_add_tail(&acl->acl_list, &tpg->acl_node_list); 313 list_add_tail(&acl->acl_list, &tpg->acl_node_list);
306 tpg->num_node_acls++; 314 tpg->num_node_acls++;
307 spin_unlock_bh(&tpg->acl_node_lock); 315 spin_unlock_irq(&tpg->acl_node_lock);
308 316
309 pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s" 317 pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"
310 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(), 318 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
@@ -354,7 +362,7 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
354{ 362{
355 struct se_node_acl *acl = NULL; 363 struct se_node_acl *acl = NULL;
356 364
357 spin_lock_bh(&tpg->acl_node_lock); 365 spin_lock_irq(&tpg->acl_node_lock);
358 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); 366 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
359 if (acl) { 367 if (acl) {
360 if (acl->dynamic_node_acl) { 368 if (acl->dynamic_node_acl) {
@@ -362,7 +370,7 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
362 pr_debug("%s_TPG[%u] - Replacing dynamic ACL" 370 pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
363 " for %s\n", tpg->se_tpg_tfo->get_fabric_name(), 371 " for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
364 tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname); 372 tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
365 spin_unlock_bh(&tpg->acl_node_lock); 373 spin_unlock_irq(&tpg->acl_node_lock);
366 /* 374 /*
367 * Release the locally allocated struct se_node_acl 375 * Release the locally allocated struct se_node_acl
368 * because * core_tpg_add_initiator_node_acl() returned 376 * because * core_tpg_add_initiator_node_acl() returned
@@ -378,10 +386,10 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
378 " Node %s already exists for TPG %u, ignoring" 386 " Node %s already exists for TPG %u, ignoring"
379 " request.\n", tpg->se_tpg_tfo->get_fabric_name(), 387 " request.\n", tpg->se_tpg_tfo->get_fabric_name(),
380 initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg)); 388 initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
381 spin_unlock_bh(&tpg->acl_node_lock); 389 spin_unlock_irq(&tpg->acl_node_lock);
382 return ERR_PTR(-EEXIST); 390 return ERR_PTR(-EEXIST);
383 } 391 }
384 spin_unlock_bh(&tpg->acl_node_lock); 392 spin_unlock_irq(&tpg->acl_node_lock);
385 393
386 if (!se_nacl) { 394 if (!se_nacl) {
387 pr_err("struct se_node_acl pointer is NULL\n"); 395 pr_err("struct se_node_acl pointer is NULL\n");
@@ -418,10 +426,10 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
418 return ERR_PTR(-EINVAL); 426 return ERR_PTR(-EINVAL);
419 } 427 }
420 428
421 spin_lock_bh(&tpg->acl_node_lock); 429 spin_lock_irq(&tpg->acl_node_lock);
422 list_add_tail(&acl->acl_list, &tpg->acl_node_list); 430 list_add_tail(&acl->acl_list, &tpg->acl_node_list);
423 tpg->num_node_acls++; 431 tpg->num_node_acls++;
424 spin_unlock_bh(&tpg->acl_node_lock); 432 spin_unlock_irq(&tpg->acl_node_lock);
425 433
426done: 434done:
427 pr_debug("%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s" 435 pr_debug("%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s"
@@ -445,14 +453,14 @@ int core_tpg_del_initiator_node_acl(
445 struct se_session *sess, *sess_tmp; 453 struct se_session *sess, *sess_tmp;
446 int dynamic_acl = 0; 454 int dynamic_acl = 0;
447 455
448 spin_lock_bh(&tpg->acl_node_lock); 456 spin_lock_irq(&tpg->acl_node_lock);
449 if (acl->dynamic_node_acl) { 457 if (acl->dynamic_node_acl) {
450 acl->dynamic_node_acl = 0; 458 acl->dynamic_node_acl = 0;
451 dynamic_acl = 1; 459 dynamic_acl = 1;
452 } 460 }
453 list_del(&acl->acl_list); 461 list_del(&acl->acl_list);
454 tpg->num_node_acls--; 462 tpg->num_node_acls--;
455 spin_unlock_bh(&tpg->acl_node_lock); 463 spin_unlock_irq(&tpg->acl_node_lock);
456 464
457 spin_lock_bh(&tpg->session_lock); 465 spin_lock_bh(&tpg->session_lock);
458 list_for_each_entry_safe(sess, sess_tmp, 466 list_for_each_entry_safe(sess, sess_tmp,
@@ -503,21 +511,21 @@ int core_tpg_set_initiator_node_queue_depth(
503 struct se_node_acl *acl; 511 struct se_node_acl *acl;
504 int dynamic_acl = 0; 512 int dynamic_acl = 0;
505 513
506 spin_lock_bh(&tpg->acl_node_lock); 514 spin_lock_irq(&tpg->acl_node_lock);
507 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); 515 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
508 if (!acl) { 516 if (!acl) {
509 pr_err("Access Control List entry for %s Initiator" 517 pr_err("Access Control List entry for %s Initiator"
510 " Node %s does not exists for TPG %hu, ignoring" 518 " Node %s does not exists for TPG %hu, ignoring"
511 " request.\n", tpg->se_tpg_tfo->get_fabric_name(), 519 " request.\n", tpg->se_tpg_tfo->get_fabric_name(),
512 initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg)); 520 initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
513 spin_unlock_bh(&tpg->acl_node_lock); 521 spin_unlock_irq(&tpg->acl_node_lock);
514 return -ENODEV; 522 return -ENODEV;
515 } 523 }
516 if (acl->dynamic_node_acl) { 524 if (acl->dynamic_node_acl) {
517 acl->dynamic_node_acl = 0; 525 acl->dynamic_node_acl = 0;
518 dynamic_acl = 1; 526 dynamic_acl = 1;
519 } 527 }
520 spin_unlock_bh(&tpg->acl_node_lock); 528 spin_unlock_irq(&tpg->acl_node_lock);
521 529
522 spin_lock_bh(&tpg->session_lock); 530 spin_lock_bh(&tpg->session_lock);
523 list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) { 531 list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) {
@@ -533,10 +541,10 @@ int core_tpg_set_initiator_node_queue_depth(
533 tpg->se_tpg_tfo->get_fabric_name(), initiatorname); 541 tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
534 spin_unlock_bh(&tpg->session_lock); 542 spin_unlock_bh(&tpg->session_lock);
535 543
536 spin_lock_bh(&tpg->acl_node_lock); 544 spin_lock_irq(&tpg->acl_node_lock);
537 if (dynamic_acl) 545 if (dynamic_acl)
538 acl->dynamic_node_acl = 1; 546 acl->dynamic_node_acl = 1;
539 spin_unlock_bh(&tpg->acl_node_lock); 547 spin_unlock_irq(&tpg->acl_node_lock);
540 return -EEXIST; 548 return -EEXIST;
541 } 549 }
542 /* 550 /*
@@ -571,10 +579,10 @@ int core_tpg_set_initiator_node_queue_depth(
571 if (init_sess) 579 if (init_sess)
572 tpg->se_tpg_tfo->close_session(init_sess); 580 tpg->se_tpg_tfo->close_session(init_sess);
573 581
574 spin_lock_bh(&tpg->acl_node_lock); 582 spin_lock_irq(&tpg->acl_node_lock);
575 if (dynamic_acl) 583 if (dynamic_acl)
576 acl->dynamic_node_acl = 1; 584 acl->dynamic_node_acl = 1;
577 spin_unlock_bh(&tpg->acl_node_lock); 585 spin_unlock_irq(&tpg->acl_node_lock);
578 return -EINVAL; 586 return -EINVAL;
579 } 587 }
580 spin_unlock_bh(&tpg->session_lock); 588 spin_unlock_bh(&tpg->session_lock);
@@ -590,10 +598,10 @@ int core_tpg_set_initiator_node_queue_depth(
590 initiatorname, tpg->se_tpg_tfo->get_fabric_name(), 598 initiatorname, tpg->se_tpg_tfo->get_fabric_name(),
591 tpg->se_tpg_tfo->tpg_get_tag(tpg)); 599 tpg->se_tpg_tfo->tpg_get_tag(tpg));
592 600
593 spin_lock_bh(&tpg->acl_node_lock); 601 spin_lock_irq(&tpg->acl_node_lock);
594 if (dynamic_acl) 602 if (dynamic_acl)
595 acl->dynamic_node_acl = 1; 603 acl->dynamic_node_acl = 1;
596 spin_unlock_bh(&tpg->acl_node_lock); 604 spin_unlock_irq(&tpg->acl_node_lock);
597 605
598 return 0; 606 return 0;
599} 607}
@@ -717,20 +725,20 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)
717 * not been released because of TFO->tpg_check_demo_mode_cache() == 1 725 * not been released because of TFO->tpg_check_demo_mode_cache() == 1
718 * in transport_deregister_session(). 726 * in transport_deregister_session().
719 */ 727 */
720 spin_lock_bh(&se_tpg->acl_node_lock); 728 spin_lock_irq(&se_tpg->acl_node_lock);
721 list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list, 729 list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list,
722 acl_list) { 730 acl_list) {
723 list_del(&nacl->acl_list); 731 list_del(&nacl->acl_list);
724 se_tpg->num_node_acls--; 732 se_tpg->num_node_acls--;
725 spin_unlock_bh(&se_tpg->acl_node_lock); 733 spin_unlock_irq(&se_tpg->acl_node_lock);
726 734
727 core_tpg_wait_for_nacl_pr_ref(nacl); 735 core_tpg_wait_for_nacl_pr_ref(nacl);
728 core_free_device_list_for_node(nacl, se_tpg); 736 core_free_device_list_for_node(nacl, se_tpg);
729 se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, nacl); 737 se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, nacl);
730 738
731 spin_lock_bh(&se_tpg->acl_node_lock); 739 spin_lock_irq(&se_tpg->acl_node_lock);
732 } 740 }
733 spin_unlock_bh(&se_tpg->acl_node_lock); 741 spin_unlock_irq(&se_tpg->acl_node_lock);
734 742
735 if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) 743 if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL)
736 core_tpg_release_virtual_lun0(se_tpg); 744 core_tpg_release_virtual_lun0(se_tpg);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 89760329d5d0..a4b0a8d27f25 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -389,17 +389,18 @@ void transport_deregister_session(struct se_session *se_sess)
389{ 389{
390 struct se_portal_group *se_tpg = se_sess->se_tpg; 390 struct se_portal_group *se_tpg = se_sess->se_tpg;
391 struct se_node_acl *se_nacl; 391 struct se_node_acl *se_nacl;
392 unsigned long flags;
392 393
393 if (!se_tpg) { 394 if (!se_tpg) {
394 transport_free_session(se_sess); 395 transport_free_session(se_sess);
395 return; 396 return;
396 } 397 }
397 398
398 spin_lock_bh(&se_tpg->session_lock); 399 spin_lock_irqsave(&se_tpg->session_lock, flags);
399 list_del(&se_sess->sess_list); 400 list_del(&se_sess->sess_list);
400 se_sess->se_tpg = NULL; 401 se_sess->se_tpg = NULL;
401 se_sess->fabric_sess_ptr = NULL; 402 se_sess->fabric_sess_ptr = NULL;
402 spin_unlock_bh(&se_tpg->session_lock); 403 spin_unlock_irqrestore(&se_tpg->session_lock, flags);
403 404
404 /* 405 /*
405 * Determine if we need to do extra work for this initiator node's 406 * Determine if we need to do extra work for this initiator node's
@@ -407,22 +408,22 @@ void transport_deregister_session(struct se_session *se_sess)
407 */ 408 */
408 se_nacl = se_sess->se_node_acl; 409 se_nacl = se_sess->se_node_acl;
409 if (se_nacl) { 410 if (se_nacl) {
410 spin_lock_bh(&se_tpg->acl_node_lock); 411 spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
411 if (se_nacl->dynamic_node_acl) { 412 if (se_nacl->dynamic_node_acl) {
412 if (!se_tpg->se_tpg_tfo->tpg_check_demo_mode_cache( 413 if (!se_tpg->se_tpg_tfo->tpg_check_demo_mode_cache(
413 se_tpg)) { 414 se_tpg)) {
414 list_del(&se_nacl->acl_list); 415 list_del(&se_nacl->acl_list);
415 se_tpg->num_node_acls--; 416 se_tpg->num_node_acls--;
416 spin_unlock_bh(&se_tpg->acl_node_lock); 417 spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
417 418
418 core_tpg_wait_for_nacl_pr_ref(se_nacl); 419 core_tpg_wait_for_nacl_pr_ref(se_nacl);
419 core_free_device_list_for_node(se_nacl, se_tpg); 420 core_free_device_list_for_node(se_nacl, se_tpg);
420 se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, 421 se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg,
421 se_nacl); 422 se_nacl);
422 spin_lock_bh(&se_tpg->acl_node_lock); 423 spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
423 } 424 }
424 } 425 }
425 spin_unlock_bh(&se_tpg->acl_node_lock); 426 spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
426 } 427 }
427 428
428 transport_free_session(se_sess); 429 transport_free_session(se_sess);
@@ -976,15 +977,17 @@ static void target_qf_do_work(struct work_struct *work)
976{ 977{
977 struct se_device *dev = container_of(work, struct se_device, 978 struct se_device *dev = container_of(work, struct se_device,
978 qf_work_queue); 979 qf_work_queue);
980 LIST_HEAD(qf_cmd_list);
979 struct se_cmd *cmd, *cmd_tmp; 981 struct se_cmd *cmd, *cmd_tmp;
980 982
981 spin_lock_irq(&dev->qf_cmd_lock); 983 spin_lock_irq(&dev->qf_cmd_lock);
982 list_for_each_entry_safe(cmd, cmd_tmp, &dev->qf_cmd_list, se_qf_node) { 984 list_splice_init(&dev->qf_cmd_list, &qf_cmd_list);
985 spin_unlock_irq(&dev->qf_cmd_lock);
983 986
987 list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
984 list_del(&cmd->se_qf_node); 988 list_del(&cmd->se_qf_node);
985 atomic_dec(&dev->dev_qf_count); 989 atomic_dec(&dev->dev_qf_count);
986 smp_mb__after_atomic_dec(); 990 smp_mb__after_atomic_dec();
987 spin_unlock_irq(&dev->qf_cmd_lock);
988 991
989 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" 992 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
990 " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd, 993 " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
@@ -996,10 +999,7 @@ static void target_qf_do_work(struct work_struct *work)
996 * has been added to head of queue 999 * has been added to head of queue
997 */ 1000 */
998 transport_add_cmd_to_queue(cmd, cmd->t_state); 1001 transport_add_cmd_to_queue(cmd, cmd->t_state);
999
1000 spin_lock_irq(&dev->qf_cmd_lock);
1001 } 1002 }
1002 spin_unlock_irq(&dev->qf_cmd_lock);
1003} 1003}
1004 1004
1005unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd) 1005unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
@@ -2053,8 +2053,14 @@ static void transport_generic_request_failure(
2053 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; 2053 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
2054 break; 2054 break;
2055 } 2055 }
2056 2056 /*
2057 if (!sc) 2057 * If a fabric does not define a cmd->se_tfo->new_cmd_map caller,
2058 * make the call to transport_send_check_condition_and_sense()
2059 * directly. Otherwise expect the fabric to make the call to
2060 * transport_send_check_condition_and_sense() after handling
2061 * possible unsoliticied write data payloads.
2062 */
2063 if (!sc && !cmd->se_tfo->new_cmd_map)
2058 transport_new_cmd_failure(cmd); 2064 transport_new_cmd_failure(cmd);
2059 else { 2065 else {
2060 ret = transport_send_check_condition_and_sense(cmd, 2066 ret = transport_send_check_condition_and_sense(cmd,
@@ -2847,12 +2853,42 @@ static int transport_cmd_get_valid_sectors(struct se_cmd *cmd)
2847 " transport_dev_end_lba(): %llu\n", 2853 " transport_dev_end_lba(): %llu\n",
2848 cmd->t_task_lba, sectors, 2854 cmd->t_task_lba, sectors,
2849 transport_dev_end_lba(dev)); 2855 transport_dev_end_lba(dev));
2850 pr_err(" We should return CHECK_CONDITION" 2856 return -EINVAL;
2851 " but we don't yet\n");
2852 return 0;
2853 } 2857 }
2854 2858
2855 return sectors; 2859 return 0;
2860}
2861
2862static int target_check_write_same_discard(unsigned char *flags, struct se_device *dev)
2863{
2864 /*
2865 * Determine if the received WRITE_SAME is used to for direct
2866 * passthrough into Linux/SCSI with struct request via TCM/pSCSI
2867 * or we are signaling the use of internal WRITE_SAME + UNMAP=1
2868 * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK code.
2869 */
2870 int passthrough = (dev->transport->transport_type ==
2871 TRANSPORT_PLUGIN_PHBA_PDEV);
2872
2873 if (!passthrough) {
2874 if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
2875 pr_err("WRITE_SAME PBDATA and LBDATA"
2876 " bits not supported for Block Discard"
2877 " Emulation\n");
2878 return -ENOSYS;
2879 }
2880 /*
2881 * Currently for the emulated case we only accept
2882 * tpws with the UNMAP=1 bit set.
2883 */
2884 if (!(flags[0] & 0x08)) {
2885 pr_err("WRITE_SAME w/o UNMAP bit not"
2886 " supported for Block Discard Emulation\n");
2887 return -ENOSYS;
2888 }
2889 }
2890
2891 return 0;
2856} 2892}
2857 2893
2858/* transport_generic_cmd_sequencer(): 2894/* transport_generic_cmd_sequencer():
@@ -3065,7 +3101,7 @@ static int transport_generic_cmd_sequencer(
3065 goto out_unsupported_cdb; 3101 goto out_unsupported_cdb;
3066 3102
3067 if (sectors) 3103 if (sectors)
3068 size = transport_get_size(sectors, cdb, cmd); 3104 size = transport_get_size(1, cdb, cmd);
3069 else { 3105 else {
3070 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not" 3106 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
3071 " supported\n"); 3107 " supported\n");
@@ -3075,27 +3111,9 @@ static int transport_generic_cmd_sequencer(
3075 cmd->t_task_lba = get_unaligned_be64(&cdb[12]); 3111 cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
3076 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 3112 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3077 3113
3078 /* 3114 if (target_check_write_same_discard(&cdb[10], dev) < 0)
3079 * Skip the remaining assignments for TCM/PSCSI passthrough
3080 */
3081 if (passthrough)
3082 break;
3083
3084 if ((cdb[10] & 0x04) || (cdb[10] & 0x02)) {
3085 pr_err("WRITE_SAME PBDATA and LBDATA"
3086 " bits not supported for Block Discard"
3087 " Emulation\n");
3088 goto out_invalid_cdb_field; 3115 goto out_invalid_cdb_field;
3089 } 3116
3090 /*
3091 * Currently for the emulated case we only accept
3092 * tpws with the UNMAP=1 bit set.
3093 */
3094 if (!(cdb[10] & 0x08)) {
3095 pr_err("WRITE_SAME w/o UNMAP bit not"
3096 " supported for Block Discard Emulation\n");
3097 goto out_invalid_cdb_field;
3098 }
3099 break; 3117 break;
3100 default: 3118 default:
3101 pr_err("VARIABLE_LENGTH_CMD service action" 3119 pr_err("VARIABLE_LENGTH_CMD service action"
@@ -3330,10 +3348,12 @@ static int transport_generic_cmd_sequencer(
3330 cmd->se_cmd_flags |= SCF_EMULATE_CDB_ASYNC; 3348 cmd->se_cmd_flags |= SCF_EMULATE_CDB_ASYNC;
3331 /* 3349 /*
3332 * Check to ensure that LBA + Range does not exceed past end of 3350 * Check to ensure that LBA + Range does not exceed past end of
3333 * device. 3351 * device for IBLOCK and FILEIO ->do_sync_cache() backend calls
3334 */ 3352 */
3335 if (!transport_cmd_get_valid_sectors(cmd)) 3353 if ((cmd->t_task_lba != 0) || (sectors != 0)) {
3336 goto out_invalid_cdb_field; 3354 if (transport_cmd_get_valid_sectors(cmd) < 0)
3355 goto out_invalid_cdb_field;
3356 }
3337 break; 3357 break;
3338 case UNMAP: 3358 case UNMAP:
3339 size = get_unaligned_be16(&cdb[7]); 3359 size = get_unaligned_be16(&cdb[7]);
@@ -3345,40 +3365,38 @@ static int transport_generic_cmd_sequencer(
3345 goto out_unsupported_cdb; 3365 goto out_unsupported_cdb;
3346 3366
3347 if (sectors) 3367 if (sectors)
3348 size = transport_get_size(sectors, cdb, cmd); 3368 size = transport_get_size(1, cdb, cmd);
3349 else { 3369 else {
3350 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); 3370 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
3351 goto out_invalid_cdb_field; 3371 goto out_invalid_cdb_field;
3352 } 3372 }
3353 3373
3354 cmd->t_task_lba = get_unaligned_be64(&cdb[2]); 3374 cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
3355 passthrough = (dev->transport->transport_type == 3375 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3356 TRANSPORT_PLUGIN_PHBA_PDEV); 3376
3357 /* 3377 if (target_check_write_same_discard(&cdb[1], dev) < 0)
3358 * Determine if the received WRITE_SAME_16 is used to for direct 3378 goto out_invalid_cdb_field;
3359 * passthrough into Linux/SCSI with struct request via TCM/pSCSI 3379 break;
3360 * or we are signaling the use of internal WRITE_SAME + UNMAP=1 3380 case WRITE_SAME:
3361 * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK and 3381 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
3362 * TCM/FILEIO subsystem plugin backstores. 3382 if (sector_ret)
3363 */ 3383 goto out_unsupported_cdb;
3364 if (!passthrough) { 3384
3365 if ((cdb[1] & 0x04) || (cdb[1] & 0x02)) { 3385 if (sectors)
3366 pr_err("WRITE_SAME PBDATA and LBDATA" 3386 size = transport_get_size(1, cdb, cmd);
3367 " bits not supported for Block Discard" 3387 else {
3368 " Emulation\n"); 3388 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
3369 goto out_invalid_cdb_field; 3389 goto out_invalid_cdb_field;
3370 }
3371 /*
3372 * Currently for the emulated case we only accept
3373 * tpws with the UNMAP=1 bit set.
3374 */
3375 if (!(cdb[1] & 0x08)) {
3376 pr_err("WRITE_SAME w/o UNMAP bit not "
3377 " supported for Block Discard Emulation\n");
3378 goto out_invalid_cdb_field;
3379 }
3380 } 3390 }
3391
3392 cmd->t_task_lba = get_unaligned_be32(&cdb[2]);
3381 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 3393 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3394 /*
3395 * Follow sbcr26 with WRITE_SAME (10) and check for the existence
3396 * of byte 1 bit 3 UNMAP instead of original reserved field
3397 */
3398 if (target_check_write_same_discard(&cdb[1], dev) < 0)
3399 goto out_invalid_cdb_field;
3382 break; 3400 break;
3383 case ALLOW_MEDIUM_REMOVAL: 3401 case ALLOW_MEDIUM_REMOVAL:
3384 case GPCMD_CLOSE_TRACK: 3402 case GPCMD_CLOSE_TRACK:
@@ -3873,9 +3891,7 @@ EXPORT_SYMBOL(transport_generic_map_mem_to_cmd);
3873static int transport_new_cmd_obj(struct se_cmd *cmd) 3891static int transport_new_cmd_obj(struct se_cmd *cmd)
3874{ 3892{
3875 struct se_device *dev = cmd->se_dev; 3893 struct se_device *dev = cmd->se_dev;
3876 u32 task_cdbs; 3894 int set_counts = 1, rc, task_cdbs;
3877 u32 rc;
3878 int set_counts = 1;
3879 3895
3880 /* 3896 /*
3881 * Setup any BIDI READ tasks and memory from 3897 * Setup any BIDI READ tasks and memory from
@@ -3893,7 +3909,7 @@ static int transport_new_cmd_obj(struct se_cmd *cmd)
3893 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 3909 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3894 cmd->scsi_sense_reason = 3910 cmd->scsi_sense_reason =
3895 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 3911 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3896 return PYX_TRANSPORT_LU_COMM_FAILURE; 3912 return -EINVAL;
3897 } 3913 }
3898 atomic_inc(&cmd->t_fe_count); 3914 atomic_inc(&cmd->t_fe_count);
3899 atomic_inc(&cmd->t_se_count); 3915 atomic_inc(&cmd->t_se_count);
@@ -3912,7 +3928,7 @@ static int transport_new_cmd_obj(struct se_cmd *cmd)
3912 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 3928 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3913 cmd->scsi_sense_reason = 3929 cmd->scsi_sense_reason =
3914 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 3930 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3915 return PYX_TRANSPORT_LU_COMM_FAILURE; 3931 return -EINVAL;
3916 } 3932 }
3917 3933
3918 if (set_counts) { 3934 if (set_counts) {
@@ -4028,8 +4044,6 @@ void transport_do_task_sg_chain(struct se_cmd *cmd)
4028 if (!task->task_sg) 4044 if (!task->task_sg)
4029 continue; 4045 continue;
4030 4046
4031 BUG_ON(!task->task_padded_sg);
4032
4033 if (!sg_first) { 4047 if (!sg_first) {
4034 sg_first = task->task_sg; 4048 sg_first = task->task_sg;
4035 chained_nents = task->task_sg_nents; 4049 chained_nents = task->task_sg_nents;
@@ -4037,9 +4051,19 @@ void transport_do_task_sg_chain(struct se_cmd *cmd)
4037 sg_chain(sg_prev, sg_prev_nents, task->task_sg); 4051 sg_chain(sg_prev, sg_prev_nents, task->task_sg);
4038 chained_nents += task->task_sg_nents; 4052 chained_nents += task->task_sg_nents;
4039 } 4053 }
4054 /*
4055 * For the padded tasks, use the extra SGL vector allocated
4056 * in transport_allocate_data_tasks() for the sg_prev_nents
4057 * offset into sg_chain() above.. The last task of a
4058 * multi-task list, or a single task will not have
4059 * task->task_sg_padded set..
4060 */
4061 if (task->task_padded_sg)
4062 sg_prev_nents = (task->task_sg_nents + 1);
4063 else
4064 sg_prev_nents = task->task_sg_nents;
4040 4065
4041 sg_prev = task->task_sg; 4066 sg_prev = task->task_sg;
4042 sg_prev_nents = task->task_sg_nents;
4043 } 4067 }
4044 /* 4068 /*
4045 * Setup the starting pointer and total t_tasks_sg_linked_no including 4069 * Setup the starting pointer and total t_tasks_sg_linked_no including
@@ -4091,7 +4115,7 @@ static int transport_allocate_data_tasks(
4091 4115
4092 cmd_sg = sgl; 4116 cmd_sg = sgl;
4093 for (i = 0; i < task_count; i++) { 4117 for (i = 0; i < task_count; i++) {
4094 unsigned int task_size; 4118 unsigned int task_size, task_sg_nents_padded;
4095 int count; 4119 int count;
4096 4120
4097 task = transport_generic_get_task(cmd, data_direction); 4121 task = transport_generic_get_task(cmd, data_direction);
@@ -4110,30 +4134,33 @@ static int transport_allocate_data_tasks(
4110 4134
4111 /* Update new cdb with updated lba/sectors */ 4135 /* Update new cdb with updated lba/sectors */
4112 cmd->transport_split_cdb(task->task_lba, task->task_sectors, cdb); 4136 cmd->transport_split_cdb(task->task_lba, task->task_sectors, cdb);
4113 4137 /*
4138 * This now assumes that passed sg_ents are in PAGE_SIZE chunks
4139 * in order to calculate the number per task SGL entries
4140 */
4141 task->task_sg_nents = DIV_ROUND_UP(task->task_size, PAGE_SIZE);
4114 /* 4142 /*
4115 * Check if the fabric module driver is requesting that all 4143 * Check if the fabric module driver is requesting that all
4116 * struct se_task->task_sg[] be chained together.. If so, 4144 * struct se_task->task_sg[] be chained together.. If so,
4117 * then allocate an extra padding SG entry for linking and 4145 * then allocate an extra padding SG entry for linking and
4118 * marking the end of the chained SGL. 4146 * marking the end of the chained SGL for every task except
4119 * Possibly over-allocate task sgl size by using cmd sgl size. 4147 * the last one for (task_count > 1) operation, or skipping
4120 * It's so much easier and only a waste when task_count > 1. 4148 * the extra padding for the (task_count == 1) case.
4121 * That is extremely rare.
4122 */ 4149 */
4123 task->task_sg_nents = sgl_nents; 4150 if (cmd->se_tfo->task_sg_chaining && (i < (task_count - 1))) {
4124 if (cmd->se_tfo->task_sg_chaining) { 4151 task_sg_nents_padded = (task->task_sg_nents + 1);
4125 task->task_sg_nents++;
4126 task->task_padded_sg = 1; 4152 task->task_padded_sg = 1;
4127 } 4153 } else
4154 task_sg_nents_padded = task->task_sg_nents;
4128 4155
4129 task->task_sg = kmalloc(sizeof(struct scatterlist) * 4156 task->task_sg = kmalloc(sizeof(struct scatterlist) *
4130 task->task_sg_nents, GFP_KERNEL); 4157 task_sg_nents_padded, GFP_KERNEL);
4131 if (!task->task_sg) { 4158 if (!task->task_sg) {
4132 cmd->se_dev->transport->free_task(task); 4159 cmd->se_dev->transport->free_task(task);
4133 return -ENOMEM; 4160 return -ENOMEM;
4134 } 4161 }
4135 4162
4136 sg_init_table(task->task_sg, task->task_sg_nents); 4163 sg_init_table(task->task_sg, task_sg_nents_padded);
4137 4164
4138 task_size = task->task_size; 4165 task_size = task->task_size;
4139 4166
@@ -4230,10 +4257,13 @@ static u32 transport_allocate_tasks(
4230 struct scatterlist *sgl, 4257 struct scatterlist *sgl,
4231 unsigned int sgl_nents) 4258 unsigned int sgl_nents)
4232{ 4259{
4233 if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) 4260 if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
4261 if (transport_cmd_get_valid_sectors(cmd) < 0)
4262 return -EINVAL;
4263
4234 return transport_allocate_data_tasks(cmd, lba, data_direction, 4264 return transport_allocate_data_tasks(cmd, lba, data_direction,
4235 sgl, sgl_nents); 4265 sgl, sgl_nents);
4236 else 4266 } else
4237 return transport_allocate_control_task(cmd); 4267 return transport_allocate_control_task(cmd);
4238 4268
4239} 4269}
@@ -4726,6 +4756,13 @@ int transport_send_check_condition_and_sense(
4726 */ 4756 */
4727 switch (reason) { 4757 switch (reason) {
4728 case TCM_NON_EXISTENT_LUN: 4758 case TCM_NON_EXISTENT_LUN:
4759 /* CURRENT ERROR */
4760 buffer[offset] = 0x70;
4761 /* ILLEGAL REQUEST */
4762 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4763 /* LOGICAL UNIT NOT SUPPORTED */
4764 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x25;
4765 break;
4729 case TCM_UNSUPPORTED_SCSI_OPCODE: 4766 case TCM_UNSUPPORTED_SCSI_OPCODE:
4730 case TCM_SECTOR_COUNT_TOO_MANY: 4767 case TCM_SECTOR_COUNT_TOO_MANY:
4731 /* CURRENT ERROR */ 4768 /* CURRENT ERROR */
diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h
index bd4fe21a23b8..3749d8b4b423 100644
--- a/drivers/target/tcm_fc/tcm_fc.h
+++ b/drivers/target/tcm_fc/tcm_fc.h
@@ -98,8 +98,7 @@ struct ft_tpg {
98 struct list_head list; /* linkage in ft_lport_acl tpg_list */ 98 struct list_head list; /* linkage in ft_lport_acl tpg_list */
99 struct list_head lun_list; /* head of LUNs */ 99 struct list_head lun_list; /* head of LUNs */
100 struct se_portal_group se_tpg; 100 struct se_portal_group se_tpg;
101 struct task_struct *thread; /* processing thread */ 101 struct workqueue_struct *workqueue;
102 struct se_queue_obj qobj; /* queue for processing thread */
103}; 102};
104 103
105struct ft_lport_acl { 104struct ft_lport_acl {
@@ -110,16 +109,10 @@ struct ft_lport_acl {
110 struct se_wwn fc_lport_wwn; 109 struct se_wwn fc_lport_wwn;
111}; 110};
112 111
113enum ft_cmd_state {
114 FC_CMD_ST_NEW = 0,
115 FC_CMD_ST_REJ
116};
117
118/* 112/*
119 * Commands 113 * Commands
120 */ 114 */
121struct ft_cmd { 115struct ft_cmd {
122 enum ft_cmd_state state;
123 u32 lun; /* LUN from request */ 116 u32 lun; /* LUN from request */
124 struct ft_sess *sess; /* session held for cmd */ 117 struct ft_sess *sess; /* session held for cmd */
125 struct fc_seq *seq; /* sequence in exchange mgr */ 118 struct fc_seq *seq; /* sequence in exchange mgr */
@@ -127,7 +120,7 @@ struct ft_cmd {
127 struct fc_frame *req_frame; 120 struct fc_frame *req_frame;
128 unsigned char *cdb; /* pointer to CDB inside frame */ 121 unsigned char *cdb; /* pointer to CDB inside frame */
129 u32 write_data_len; /* data received on writes */ 122 u32 write_data_len; /* data received on writes */
130 struct se_queue_req se_req; 123 struct work_struct work;
131 /* Local sense buffer */ 124 /* Local sense buffer */
132 unsigned char ft_sense_buffer[TRANSPORT_SENSE_BUFFER]; 125 unsigned char ft_sense_buffer[TRANSPORT_SENSE_BUFFER];
133 u32 was_ddp_setup:1; /* Set only if ddp is setup */ 126 u32 was_ddp_setup:1; /* Set only if ddp is setup */
@@ -177,7 +170,6 @@ int ft_is_state_remove(struct se_cmd *);
177/* 170/*
178 * other internal functions. 171 * other internal functions.
179 */ 172 */
180int ft_thread(void *);
181void ft_recv_req(struct ft_sess *, struct fc_frame *); 173void ft_recv_req(struct ft_sess *, struct fc_frame *);
182struct ft_tpg *ft_lport_find_tpg(struct fc_lport *); 174struct ft_tpg *ft_lport_find_tpg(struct fc_lport *);
183struct ft_node_acl *ft_acl_get(struct ft_tpg *, struct fc_rport_priv *); 175struct ft_node_acl *ft_acl_get(struct ft_tpg *, struct fc_rport_priv *);
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 5654dc22f7ae..80fbcde00cb6 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -62,8 +62,8 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
62 int count; 62 int count;
63 63
64 se_cmd = &cmd->se_cmd; 64 se_cmd = &cmd->se_cmd;
65 pr_debug("%s: cmd %p state %d sess %p seq %p se_cmd %p\n", 65 pr_debug("%s: cmd %p sess %p seq %p se_cmd %p\n",
66 caller, cmd, cmd->state, cmd->sess, cmd->seq, se_cmd); 66 caller, cmd, cmd->sess, cmd->seq, se_cmd);
67 pr_debug("%s: cmd %p cdb %p\n", 67 pr_debug("%s: cmd %p cdb %p\n",
68 caller, cmd, cmd->cdb); 68 caller, cmd, cmd->cdb);
69 pr_debug("%s: cmd %p lun %d\n", caller, cmd, cmd->lun); 69 pr_debug("%s: cmd %p lun %d\n", caller, cmd, cmd->lun);
@@ -90,38 +90,6 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
90 16, 4, cmd->cdb, MAX_COMMAND_SIZE, 0); 90 16, 4, cmd->cdb, MAX_COMMAND_SIZE, 0);
91} 91}
92 92
93static void ft_queue_cmd(struct ft_sess *sess, struct ft_cmd *cmd)
94{
95 struct ft_tpg *tpg = sess->tport->tpg;
96 struct se_queue_obj *qobj = &tpg->qobj;
97 unsigned long flags;
98
99 qobj = &sess->tport->tpg->qobj;
100 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
101 list_add_tail(&cmd->se_req.qr_list, &qobj->qobj_list);
102 atomic_inc(&qobj->queue_cnt);
103 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
104
105 wake_up_process(tpg->thread);
106}
107
108static struct ft_cmd *ft_dequeue_cmd(struct se_queue_obj *qobj)
109{
110 unsigned long flags;
111 struct se_queue_req *qr;
112
113 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
114 if (list_empty(&qobj->qobj_list)) {
115 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
116 return NULL;
117 }
118 qr = list_first_entry(&qobj->qobj_list, struct se_queue_req, qr_list);
119 list_del(&qr->qr_list);
120 atomic_dec(&qobj->queue_cnt);
121 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
122 return container_of(qr, struct ft_cmd, se_req);
123}
124
125static void ft_free_cmd(struct ft_cmd *cmd) 93static void ft_free_cmd(struct ft_cmd *cmd)
126{ 94{
127 struct fc_frame *fp; 95 struct fc_frame *fp;
@@ -282,9 +250,7 @@ u32 ft_get_task_tag(struct se_cmd *se_cmd)
282 250
283int ft_get_cmd_state(struct se_cmd *se_cmd) 251int ft_get_cmd_state(struct se_cmd *se_cmd)
284{ 252{
285 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd); 253 return 0;
286
287 return cmd->state;
288} 254}
289 255
290int ft_is_state_remove(struct se_cmd *se_cmd) 256int ft_is_state_remove(struct se_cmd *se_cmd)
@@ -505,6 +471,8 @@ int ft_queue_tm_resp(struct se_cmd *se_cmd)
505 return 0; 471 return 0;
506} 472}
507 473
474static void ft_send_work(struct work_struct *work);
475
508/* 476/*
509 * Handle incoming FCP command. 477 * Handle incoming FCP command.
510 */ 478 */
@@ -523,7 +491,9 @@ static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp)
523 goto busy; 491 goto busy;
524 } 492 }
525 cmd->req_frame = fp; /* hold frame during cmd */ 493 cmd->req_frame = fp; /* hold frame during cmd */
526 ft_queue_cmd(sess, cmd); 494
495 INIT_WORK(&cmd->work, ft_send_work);
496 queue_work(sess->tport->tpg->workqueue, &cmd->work);
527 return; 497 return;
528 498
529busy: 499busy:
@@ -563,12 +533,13 @@ void ft_recv_req(struct ft_sess *sess, struct fc_frame *fp)
563/* 533/*
564 * Send new command to target. 534 * Send new command to target.
565 */ 535 */
566static void ft_send_cmd(struct ft_cmd *cmd) 536static void ft_send_work(struct work_struct *work)
567{ 537{
538 struct ft_cmd *cmd = container_of(work, struct ft_cmd, work);
568 struct fc_frame_header *fh = fc_frame_header_get(cmd->req_frame); 539 struct fc_frame_header *fh = fc_frame_header_get(cmd->req_frame);
569 struct se_cmd *se_cmd; 540 struct se_cmd *se_cmd;
570 struct fcp_cmnd *fcp; 541 struct fcp_cmnd *fcp;
571 int data_dir; 542 int data_dir = 0;
572 u32 data_len; 543 u32 data_len;
573 int task_attr; 544 int task_attr;
574 int ret; 545 int ret;
@@ -675,42 +646,3 @@ static void ft_send_cmd(struct ft_cmd *cmd)
675err: 646err:
676 ft_send_resp_code_and_free(cmd, FCP_CMND_FIELDS_INVALID); 647 ft_send_resp_code_and_free(cmd, FCP_CMND_FIELDS_INVALID);
677} 648}
678
679/*
680 * Handle request in the command thread.
681 */
682static void ft_exec_req(struct ft_cmd *cmd)
683{
684 pr_debug("cmd state %x\n", cmd->state);
685 switch (cmd->state) {
686 case FC_CMD_ST_NEW:
687 ft_send_cmd(cmd);
688 break;
689 default:
690 break;
691 }
692}
693
694/*
695 * Processing thread.
696 * Currently one thread per tpg.
697 */
698int ft_thread(void *arg)
699{
700 struct ft_tpg *tpg = arg;
701 struct se_queue_obj *qobj = &tpg->qobj;
702 struct ft_cmd *cmd;
703
704 while (!kthread_should_stop()) {
705 schedule_timeout_interruptible(MAX_SCHEDULE_TIMEOUT);
706 if (kthread_should_stop())
707 goto out;
708
709 cmd = ft_dequeue_cmd(qobj);
710 if (cmd)
711 ft_exec_req(cmd);
712 }
713
714out:
715 return 0;
716}
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index 8781d1e423df..8fa39b74f22c 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -256,7 +256,7 @@ struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata)
256 struct se_portal_group *se_tpg = &tpg->se_tpg; 256 struct se_portal_group *se_tpg = &tpg->se_tpg;
257 struct se_node_acl *se_acl; 257 struct se_node_acl *se_acl;
258 258
259 spin_lock_bh(&se_tpg->acl_node_lock); 259 spin_lock_irq(&se_tpg->acl_node_lock);
260 list_for_each_entry(se_acl, &se_tpg->acl_node_list, acl_list) { 260 list_for_each_entry(se_acl, &se_tpg->acl_node_list, acl_list) {
261 acl = container_of(se_acl, struct ft_node_acl, se_node_acl); 261 acl = container_of(se_acl, struct ft_node_acl, se_node_acl);
262 pr_debug("acl %p port_name %llx\n", 262 pr_debug("acl %p port_name %llx\n",
@@ -270,7 +270,7 @@ struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata)
270 break; 270 break;
271 } 271 }
272 } 272 }
273 spin_unlock_bh(&se_tpg->acl_node_lock); 273 spin_unlock_irq(&se_tpg->acl_node_lock);
274 return found; 274 return found;
275} 275}
276 276
@@ -327,7 +327,6 @@ static struct se_portal_group *ft_add_tpg(
327 tpg->index = index; 327 tpg->index = index;
328 tpg->lport_acl = lacl; 328 tpg->lport_acl = lacl;
329 INIT_LIST_HEAD(&tpg->lun_list); 329 INIT_LIST_HEAD(&tpg->lun_list);
330 transport_init_queue_obj(&tpg->qobj);
331 330
332 ret = core_tpg_register(&ft_configfs->tf_ops, wwn, &tpg->se_tpg, 331 ret = core_tpg_register(&ft_configfs->tf_ops, wwn, &tpg->se_tpg,
333 tpg, TRANSPORT_TPG_TYPE_NORMAL); 332 tpg, TRANSPORT_TPG_TYPE_NORMAL);
@@ -336,8 +335,8 @@ static struct se_portal_group *ft_add_tpg(
336 return NULL; 335 return NULL;
337 } 336 }
338 337
339 tpg->thread = kthread_run(ft_thread, tpg, "ft_tpg%lu", index); 338 tpg->workqueue = alloc_workqueue("tcm_fc", 0, 1);
340 if (IS_ERR(tpg->thread)) { 339 if (!tpg->workqueue) {
341 kfree(tpg); 340 kfree(tpg);
342 return NULL; 341 return NULL;
343 } 342 }
@@ -356,7 +355,7 @@ static void ft_del_tpg(struct se_portal_group *se_tpg)
356 pr_debug("del tpg %s\n", 355 pr_debug("del tpg %s\n",
357 config_item_name(&tpg->se_tpg.tpg_group.cg_item)); 356 config_item_name(&tpg->se_tpg.tpg_group.cg_item));
358 357
359 kthread_stop(tpg->thread); 358 destroy_workqueue(tpg->workqueue);
360 359
361 /* Wait for sessions to be freed thru RCU, for BUG_ON below */ 360 /* Wait for sessions to be freed thru RCU, for BUG_ON below */
362 synchronize_rcu(); 361 synchronize_rcu();
@@ -655,9 +654,7 @@ static void __exit ft_exit(void)
655 synchronize_rcu(); 654 synchronize_rcu();
656} 655}
657 656
658#ifdef MODULE
659MODULE_DESCRIPTION("FC TCM fabric driver " FT_VERSION); 657MODULE_DESCRIPTION("FC TCM fabric driver " FT_VERSION);
660MODULE_LICENSE("GPL"); 658MODULE_LICENSE("GPL");
661module_init(ft_init); 659module_init(ft_init);
662module_exit(ft_exit); 660module_exit(ft_exit);
663#endif /* MODULE */
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
index c37f4cd96452..d35ea5a3d56c 100644
--- a/drivers/target/tcm_fc/tfc_io.c
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -219,43 +219,41 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
219 if (cmd->was_ddp_setup) { 219 if (cmd->was_ddp_setup) {
220 BUG_ON(!ep); 220 BUG_ON(!ep);
221 BUG_ON(!lport); 221 BUG_ON(!lport);
222 } 222 /*
223 223 * Since DDP (Large Rx offload) was setup for this request,
224 /* 224 * payload is expected to be copied directly to user buffers.
225 * Doesn't expect payload if DDP is setup. Payload 225 */
226 * is expected to be copied directly to user buffers 226 buf = fc_frame_payload_get(fp, 1);
227 * due to DDP (Large Rx offload), 227 if (buf)
228 */ 228 pr_err("%s: xid 0x%x, f_ctl 0x%x, cmd->sg %p, "
229 buf = fc_frame_payload_get(fp, 1);
230 if (buf)
231 pr_err("%s: xid 0x%x, f_ctl 0x%x, cmd->sg %p, "
232 "cmd->sg_cnt 0x%x. DDP was setup" 229 "cmd->sg_cnt 0x%x. DDP was setup"
233 " hence not expected to receive frame with " 230 " hence not expected to receive frame with "
234 "payload, Frame will be dropped if " 231 "payload, Frame will be dropped if"
235 "'Sequence Initiative' bit in f_ctl is " 232 "'Sequence Initiative' bit in f_ctl is"
236 "not set\n", __func__, ep->xid, f_ctl, 233 "not set\n", __func__, ep->xid, f_ctl,
237 cmd->sg, cmd->sg_cnt); 234 cmd->sg, cmd->sg_cnt);
238 /* 235 /*
239 * Invalidate HW DDP context if it was setup for respective 236 * Invalidate HW DDP context if it was setup for respective
240 * command. Invalidation of HW DDP context is requited in both 237 * command. Invalidation of HW DDP context is requited in both
241 * situation (success and error). 238 * situation (success and error).
242 */ 239 */
243 ft_invl_hw_context(cmd); 240 ft_invl_hw_context(cmd);
244 241
245 /* 242 /*
246 * If "Sequence Initiative (TSI)" bit set in f_ctl, means last 243 * If "Sequence Initiative (TSI)" bit set in f_ctl, means last
247 * write data frame is received successfully where payload is 244 * write data frame is received successfully where payload is
248 * posted directly to user buffer and only the last frame's 245 * posted directly to user buffer and only the last frame's
249 * header is posted in receive queue. 246 * header is posted in receive queue.
250 * 247 *
251 * If "Sequence Initiative (TSI)" bit is not set, means error 248 * If "Sequence Initiative (TSI)" bit is not set, means error
252 * condition w.r.t. DDP, hence drop the packet and let explict 249 * condition w.r.t. DDP, hence drop the packet and let explict
253 * ABORTS from other end of exchange timer trigger the recovery. 250 * ABORTS from other end of exchange timer trigger the recovery.
254 */ 251 */
255 if (f_ctl & FC_FC_SEQ_INIT) 252 if (f_ctl & FC_FC_SEQ_INIT)
256 goto last_frame; 253 goto last_frame;
257 else 254 else
258 goto drop; 255 goto drop;
256 }
259 257
260 rel_off = ntohl(fh->fh_parm_offset); 258 rel_off = ntohl(fh->fh_parm_offset);
261 frame_len = fr_len(fp); 259 frame_len = fr_len(fp);
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index 98b6e3bdb000..e809e9d4683c 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -446,8 +446,19 @@ static inline void legacy_pty_init(void) { }
446int pty_limit = NR_UNIX98_PTY_DEFAULT; 446int pty_limit = NR_UNIX98_PTY_DEFAULT;
447static int pty_limit_min; 447static int pty_limit_min;
448static int pty_limit_max = NR_UNIX98_PTY_MAX; 448static int pty_limit_max = NR_UNIX98_PTY_MAX;
449static int tty_count;
449static int pty_count; 450static int pty_count;
450 451
452static inline void pty_inc_count(void)
453{
454 pty_count = (++tty_count) / 2;
455}
456
457static inline void pty_dec_count(void)
458{
459 pty_count = (--tty_count) / 2;
460}
461
451static struct cdev ptmx_cdev; 462static struct cdev ptmx_cdev;
452 463
453static struct ctl_table pty_table[] = { 464static struct ctl_table pty_table[] = {
@@ -542,6 +553,7 @@ static struct tty_struct *pts_unix98_lookup(struct tty_driver *driver,
542 553
543static void pty_unix98_shutdown(struct tty_struct *tty) 554static void pty_unix98_shutdown(struct tty_struct *tty)
544{ 555{
556 tty_driver_remove_tty(tty->driver, tty);
545 /* We have our own method as we don't use the tty index */ 557 /* We have our own method as we don't use the tty index */
546 kfree(tty->termios); 558 kfree(tty->termios);
547} 559}
@@ -588,7 +600,8 @@ static int pty_unix98_install(struct tty_driver *driver, struct tty_struct *tty)
588 */ 600 */
589 tty_driver_kref_get(driver); 601 tty_driver_kref_get(driver);
590 tty->count++; 602 tty->count++;
591 pty_count++; 603 pty_inc_count(); /* tty */
604 pty_inc_count(); /* tty->link */
592 return 0; 605 return 0;
593err_free_mem: 606err_free_mem:
594 deinitialize_tty_struct(o_tty); 607 deinitialize_tty_struct(o_tty);
@@ -602,7 +615,7 @@ err_free_tty:
602 615
603static void pty_unix98_remove(struct tty_driver *driver, struct tty_struct *tty) 616static void pty_unix98_remove(struct tty_driver *driver, struct tty_struct *tty)
604{ 617{
605 pty_count--; 618 pty_dec_count();
606} 619}
607 620
608static const struct tty_operations ptm_unix98_ops = { 621static const struct tty_operations ptm_unix98_ops = {
diff --git a/drivers/tty/serial/8250.c b/drivers/tty/serial/8250.c
index f2dfec82faf8..7f50999eebc2 100644
--- a/drivers/tty/serial/8250.c
+++ b/drivers/tty/serial/8250.c
@@ -1819,6 +1819,8 @@ static void serial8250_backup_timeout(unsigned long data)
1819 unsigned int iir, ier = 0, lsr; 1819 unsigned int iir, ier = 0, lsr;
1820 unsigned long flags; 1820 unsigned long flags;
1821 1821
1822 spin_lock_irqsave(&up->port.lock, flags);
1823
1822 /* 1824 /*
1823 * Must disable interrupts or else we risk racing with the interrupt 1825 * Must disable interrupts or else we risk racing with the interrupt
1824 * based handler. 1826 * based handler.
@@ -1836,10 +1838,8 @@ static void serial8250_backup_timeout(unsigned long data)
1836 * the "Diva" UART used on the management processor on many HP 1838 * the "Diva" UART used on the management processor on many HP
1837 * ia64 and parisc boxes. 1839 * ia64 and parisc boxes.
1838 */ 1840 */
1839 spin_lock_irqsave(&up->port.lock, flags);
1840 lsr = serial_in(up, UART_LSR); 1841 lsr = serial_in(up, UART_LSR);
1841 up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS; 1842 up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
1842 spin_unlock_irqrestore(&up->port.lock, flags);
1843 if ((iir & UART_IIR_NO_INT) && (up->ier & UART_IER_THRI) && 1843 if ((iir & UART_IIR_NO_INT) && (up->ier & UART_IER_THRI) &&
1844 (!uart_circ_empty(&up->port.state->xmit) || up->port.x_char) && 1844 (!uart_circ_empty(&up->port.state->xmit) || up->port.x_char) &&
1845 (lsr & UART_LSR_THRE)) { 1845 (lsr & UART_LSR_THRE)) {
@@ -1848,11 +1848,13 @@ static void serial8250_backup_timeout(unsigned long data)
1848 } 1848 }
1849 1849
1850 if (!(iir & UART_IIR_NO_INT)) 1850 if (!(iir & UART_IIR_NO_INT))
1851 serial8250_handle_port(up); 1851 transmit_chars(up);
1852 1852
1853 if (is_real_interrupt(up->port.irq)) 1853 if (is_real_interrupt(up->port.irq))
1854 serial_out(up, UART_IER, ier); 1854 serial_out(up, UART_IER, ier);
1855 1855
1856 spin_unlock_irqrestore(&up->port.lock, flags);
1857
1856 /* Standard timer interval plus 0.2s to keep the port running */ 1858 /* Standard timer interval plus 0.2s to keep the port running */
1857 mod_timer(&up->timer, 1859 mod_timer(&up->timer,
1858 jiffies + uart_poll_timeout(&up->port) + HZ / 5); 1860 jiffies + uart_poll_timeout(&up->port) + HZ / 5);
diff --git a/drivers/tty/serial/8250_pci.c b/drivers/tty/serial/8250_pci.c
index 6b887d90a205..3abeca2a2a1b 100644
--- a/drivers/tty/serial/8250_pci.c
+++ b/drivers/tty/serial/8250_pci.c
@@ -1599,11 +1599,6 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
1599 .device = 0x800D, 1599 .device = 0x800D,
1600 .init = pci_eg20t_init, 1600 .init = pci_eg20t_init,
1601 }, 1601 },
1602 {
1603 .vendor = 0x10DB,
1604 .device = 0x800D,
1605 .init = pci_eg20t_init,
1606 },
1607 /* 1602 /*
1608 * Cronyx Omega PCI (PLX-chip based) 1603 * Cronyx Omega PCI (PLX-chip based)
1609 */ 1604 */
@@ -4021,7 +4016,7 @@ static struct pci_device_id serial_pci_tbl[] = {
4021 0, 0, pbn_NETMOS9900_2s_115200 }, 4016 0, 0, pbn_NETMOS9900_2s_115200 },
4022 4017
4023 /* 4018 /*
4024 * Best Connectivity PCI Multi I/O cards 4019 * Best Connectivity and Rosewill PCI Multi I/O cards
4025 */ 4020 */
4026 4021
4027 { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865, 4022 { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865,
@@ -4029,6 +4024,10 @@ static struct pci_device_id serial_pci_tbl[] = {
4029 0, 0, pbn_b0_1_115200 }, 4024 0, 0, pbn_b0_1_115200 },
4030 4025
4031 { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865, 4026 { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865,
4027 0xA000, 0x3002,
4028 0, 0, pbn_b0_bt_2_115200 },
4029
4030 { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865,
4032 0xA000, 0x3004, 4031 0xA000, 0x3004,
4033 0, 0, pbn_b0_bt_4_115200 }, 4032 0, 0, pbn_b0_bt_4_115200 },
4034 /* Intel CE4100 */ 4033 /* Intel CE4100 */
diff --git a/drivers/tty/serial/8250_pnp.c b/drivers/tty/serial/8250_pnp.c
index fc301f6722e1..a2f236510ff1 100644
--- a/drivers/tty/serial/8250_pnp.c
+++ b/drivers/tty/serial/8250_pnp.c
@@ -109,6 +109,9 @@ static const struct pnp_device_id pnp_dev_table[] = {
109 /* IBM */ 109 /* IBM */
110 /* IBM Thinkpad 701 Internal Modem Voice */ 110 /* IBM Thinkpad 701 Internal Modem Voice */
111 { "IBM0033", 0 }, 111 { "IBM0033", 0 },
112 /* Intermec */
113 /* Intermec CV60 touchscreen port */
114 { "PNP4972", 0 },
112 /* Intertex */ 115 /* Intertex */
113 /* Intertex 28k8 33k6 Voice EXT PnP */ 116 /* Intertex 28k8 33k6 Voice EXT PnP */
114 { "IXDC801", 0 }, 117 { "IXDC801", 0 },
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index af9b7814965a..caba6730a943 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -46,7 +46,7 @@
46 46
47#ifdef CONFIG_ARM 47#ifdef CONFIG_ARM
48#include <mach/cpu.h> 48#include <mach/cpu.h>
49#include <mach/gpio.h> 49#include <asm/gpio.h>
50#endif 50#endif
51 51
52#define PDC_BUFFER_SIZE 512 52#define PDC_BUFFER_SIZE 512
@@ -1609,9 +1609,11 @@ static struct console atmel_console = {
1609static int __init atmel_console_init(void) 1609static int __init atmel_console_init(void)
1610{ 1610{
1611 if (atmel_default_console_device) { 1611 if (atmel_default_console_device) {
1612 add_preferred_console(ATMEL_DEVICENAME, 1612 struct atmel_uart_data *pdata =
1613 atmel_default_console_device->id, NULL); 1613 atmel_default_console_device->dev.platform_data;
1614 atmel_init_port(&atmel_ports[atmel_default_console_device->id], 1614
1615 add_preferred_console(ATMEL_DEVICENAME, pdata->num, NULL);
1616 atmel_init_port(&atmel_ports[pdata->num],
1615 atmel_default_console_device); 1617 atmel_default_console_device);
1616 register_console(&atmel_console); 1618 register_console(&atmel_console);
1617 } 1619 }
diff --git a/drivers/tty/serial/crisv10.c b/drivers/tty/serial/crisv10.c
index 225123b37f19..58be715913cd 100644
--- a/drivers/tty/serial/crisv10.c
+++ b/drivers/tty/serial/crisv10.c
@@ -4450,7 +4450,7 @@ static int __init rs_init(void)
4450 4450
4451#if defined(CONFIG_ETRAX_RS485) 4451#if defined(CONFIG_ETRAX_RS485)
4452#if defined(CONFIG_ETRAX_RS485_ON_PA) 4452#if defined(CONFIG_ETRAX_RS485_ON_PA)
4453 if (cris_io_interface_allocate_pins(if_ser0, 'a', rs485_pa_bit, 4453 if (cris_io_interface_allocate_pins(if_serial_0, 'a', rs485_pa_bit,
4454 rs485_pa_bit)) { 4454 rs485_pa_bit)) {
4455 printk(KERN_CRIT "ETRAX100LX serial: Could not allocate " 4455 printk(KERN_CRIT "ETRAX100LX serial: Could not allocate "
4456 "RS485 pin\n"); 4456 "RS485 pin\n");
@@ -4459,7 +4459,7 @@ static int __init rs_init(void)
4459 } 4459 }
4460#endif 4460#endif
4461#if defined(CONFIG_ETRAX_RS485_ON_PORT_G) 4461#if defined(CONFIG_ETRAX_RS485_ON_PORT_G)
4462 if (cris_io_interface_allocate_pins(if_ser0, 'g', rs485_pa_bit, 4462 if (cris_io_interface_allocate_pins(if_serial_0, 'g', rs485_pa_bit,
4463 rs485_port_g_bit)) { 4463 rs485_port_g_bit)) {
4464 printk(KERN_CRIT "ETRAX100LX serial: Could not allocate " 4464 printk(KERN_CRIT "ETRAX100LX serial: Could not allocate "
4465 "RS485 pin\n"); 4465 "RS485 pin\n");
diff --git a/drivers/tty/serial/max3107-aava.c b/drivers/tty/serial/max3107-aava.c
index a1fe304f2f52..d73aadd7a9ad 100644
--- a/drivers/tty/serial/max3107-aava.c
+++ b/drivers/tty/serial/max3107-aava.c
@@ -340,5 +340,5 @@ module_exit(max3107_exit);
340 340
341MODULE_DESCRIPTION("MAX3107 driver"); 341MODULE_DESCRIPTION("MAX3107 driver");
342MODULE_AUTHOR("Aavamobile"); 342MODULE_AUTHOR("Aavamobile");
343MODULE_ALIAS("aava-max3107-spi"); 343MODULE_ALIAS("spi:aava-max3107");
344MODULE_LICENSE("GPL v2"); 344MODULE_LICENSE("GPL v2");
diff --git a/drivers/tty/serial/max3107.c b/drivers/tty/serial/max3107.c
index 750b4f627315..a8164601c0ea 100644
--- a/drivers/tty/serial/max3107.c
+++ b/drivers/tty/serial/max3107.c
@@ -1209,5 +1209,5 @@ module_exit(max3107_exit);
1209 1209
1210MODULE_DESCRIPTION("MAX3107 driver"); 1210MODULE_DESCRIPTION("MAX3107 driver");
1211MODULE_AUTHOR("Aavamobile"); 1211MODULE_AUTHOR("Aavamobile");
1212MODULE_ALIAS("max3107-spi"); 1212MODULE_ALIAS("spi:max3107");
1213MODULE_LICENSE("GPL v2"); 1213MODULE_LICENSE("GPL v2");
diff --git a/drivers/tty/serial/mrst_max3110.c b/drivers/tty/serial/mrst_max3110.c
index a764bf99743b..23bc743f2a22 100644
--- a/drivers/tty/serial/mrst_max3110.c
+++ b/drivers/tty/serial/mrst_max3110.c
@@ -917,4 +917,4 @@ module_init(serial_m3110_init);
917module_exit(serial_m3110_exit); 917module_exit(serial_m3110_exit);
918 918
919MODULE_LICENSE("GPL v2"); 919MODULE_LICENSE("GPL v2");
920MODULE_ALIAS("max3110-uart"); 920MODULE_ALIAS("spi:max3110-uart");
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index c37df8d0fa28..5e713d3ef1f4 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -806,8 +806,7 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
806 806
807 serial_omap_set_mctrl(&up->port, up->port.mctrl); 807 serial_omap_set_mctrl(&up->port, up->port.mctrl);
808 /* Software Flow Control Configuration */ 808 /* Software Flow Control Configuration */
809 if (termios->c_iflag & (IXON | IXOFF)) 809 serial_omap_configure_xonxoff(up, termios);
810 serial_omap_configure_xonxoff(up, termios);
811 810
812 spin_unlock_irqrestore(&up->port.lock, flags); 811 spin_unlock_irqrestore(&up->port.lock, flags);
813 dev_dbg(up->port.dev, "serial_omap_set_termios+%d\n", up->pdev->id); 812 dev_dbg(up->port.dev, "serial_omap_set_termios+%d\n", up->pdev->id);
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index 846dfcd3ce0d..b46218d679e2 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -598,7 +598,8 @@ static void pch_request_dma(struct uart_port *port)
598 dma_cap_zero(mask); 598 dma_cap_zero(mask);
599 dma_cap_set(DMA_SLAVE, mask); 599 dma_cap_set(DMA_SLAVE, mask);
600 600
601 dma_dev = pci_get_bus_and_slot(2, PCI_DEVFN(0xa, 0)); /* Get DMA's dev 601 dma_dev = pci_get_bus_and_slot(priv->pdev->bus->number,
602 PCI_DEVFN(0xa, 0)); /* Get DMA's dev
602 information */ 603 information */
603 /* Set Tx DMA */ 604 /* Set Tx DMA */
604 param = &priv->param_tx; 605 param = &priv->param_tx;
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index afc629423152..6edafb5ace18 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -1225,15 +1225,19 @@ static const struct dev_pm_ops s3c24xx_serial_pm_ops = {
1225 .suspend = s3c24xx_serial_suspend, 1225 .suspend = s3c24xx_serial_suspend,
1226 .resume = s3c24xx_serial_resume, 1226 .resume = s3c24xx_serial_resume,
1227}; 1227};
1228#define SERIAL_SAMSUNG_PM_OPS (&s3c24xx_serial_pm_ops)
1229
1228#else /* !CONFIG_PM_SLEEP */ 1230#else /* !CONFIG_PM_SLEEP */
1229#define s3c24xx_serial_pm_ops NULL 1231
1232#define SERIAL_SAMSUNG_PM_OPS NULL
1230#endif /* CONFIG_PM_SLEEP */ 1233#endif /* CONFIG_PM_SLEEP */
1231 1234
1232int s3c24xx_serial_init(struct platform_driver *drv, 1235int s3c24xx_serial_init(struct platform_driver *drv,
1233 struct s3c24xx_uart_info *info) 1236 struct s3c24xx_uart_info *info)
1234{ 1237{
1235 dbg("s3c24xx_serial_init(%p,%p)\n", drv, info); 1238 dbg("s3c24xx_serial_init(%p,%p)\n", drv, info);
1236 drv->driver.pm = &s3c24xx_serial_pm_ops; 1239
1240 drv->driver.pm = SERIAL_SAMSUNG_PM_OPS;
1237 1241
1238 return platform_driver_register(drv); 1242 return platform_driver_register(drv);
1239} 1243}
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index db7912cb7ae0..a3efbea5dbba 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -200,6 +200,11 @@ static int uart_startup(struct tty_struct *tty, struct uart_state *state, int in
200 clear_bit(TTY_IO_ERROR, &tty->flags); 200 clear_bit(TTY_IO_ERROR, &tty->flags);
201 } 201 }
202 202
203 /*
204 * This is to allow setserial on this port. People may want to set
205 * port/irq/type and then reconfigure the port properly if it failed
206 * now.
207 */
203 if (retval && capable(CAP_SYS_ADMIN)) 208 if (retval && capable(CAP_SYS_ADMIN))
204 retval = 0; 209 retval = 0;
205 210
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 2ec57b2fb278..5ea6ec3442e6 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -47,6 +47,7 @@
47#include <linux/ctype.h> 47#include <linux/ctype.h>
48#include <linux/err.h> 48#include <linux/err.h>
49#include <linux/dmaengine.h> 49#include <linux/dmaengine.h>
50#include <linux/dma-mapping.h>
50#include <linux/scatterlist.h> 51#include <linux/scatterlist.h>
51#include <linux/slab.h> 52#include <linux/slab.h>
52 53
@@ -95,6 +96,12 @@ struct sci_port {
95#endif 96#endif
96 97
97 struct notifier_block freq_transition; 98 struct notifier_block freq_transition;
99
100#ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
101 unsigned short saved_smr;
102 unsigned short saved_fcr;
103 unsigned char saved_brr;
104#endif
98}; 105};
99 106
100/* Function prototypes */ 107/* Function prototypes */
@@ -1076,7 +1083,7 @@ static unsigned int sci_get_mctrl(struct uart_port *port)
1076 /* This routine is used for getting signals of: DTR, DCD, DSR, RI, 1083 /* This routine is used for getting signals of: DTR, DCD, DSR, RI,
1077 and CTS/RTS */ 1084 and CTS/RTS */
1078 1085
1079 return TIOCM_DTR | TIOCM_RTS | TIOCM_DSR; 1086 return TIOCM_DTR | TIOCM_RTS | TIOCM_CTS | TIOCM_DSR;
1080} 1087}
1081 1088
1082#ifdef CONFIG_SERIAL_SH_SCI_DMA 1089#ifdef CONFIG_SERIAL_SH_SCI_DMA
@@ -1633,11 +1640,25 @@ static unsigned int sci_scbrr_calc(unsigned int algo_id, unsigned int bps,
1633 return ((freq + 16 * bps) / (32 * bps) - 1); 1640 return ((freq + 16 * bps) / (32 * bps) - 1);
1634} 1641}
1635 1642
1643static void sci_reset(struct uart_port *port)
1644{
1645 unsigned int status;
1646
1647 do {
1648 status = sci_in(port, SCxSR);
1649 } while (!(status & SCxSR_TEND(port)));
1650
1651 sci_out(port, SCSCR, 0x00); /* TE=0, RE=0, CKE1=0 */
1652
1653 if (port->type != PORT_SCI)
1654 sci_out(port, SCFCR, SCFCR_RFRST | SCFCR_TFRST);
1655}
1656
1636static void sci_set_termios(struct uart_port *port, struct ktermios *termios, 1657static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
1637 struct ktermios *old) 1658 struct ktermios *old)
1638{ 1659{
1639 struct sci_port *s = to_sci_port(port); 1660 struct sci_port *s = to_sci_port(port);
1640 unsigned int status, baud, smr_val, max_baud; 1661 unsigned int baud, smr_val, max_baud;
1641 int t = -1; 1662 int t = -1;
1642 u16 scfcr = 0; 1663 u16 scfcr = 0;
1643 1664
@@ -1657,14 +1678,7 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
1657 1678
1658 sci_port_enable(s); 1679 sci_port_enable(s);
1659 1680
1660 do { 1681 sci_reset(port);
1661 status = sci_in(port, SCxSR);
1662 } while (!(status & SCxSR_TEND(port)));
1663
1664 sci_out(port, SCSCR, 0x00); /* TE=0, RE=0, CKE1=0 */
1665
1666 if (port->type != PORT_SCI)
1667 sci_out(port, SCFCR, scfcr | SCFCR_RFRST | SCFCR_TFRST);
1668 1682
1669 smr_val = sci_in(port, SCSMR) & 3; 1683 smr_val = sci_in(port, SCSMR) & 3;
1670 1684
@@ -1913,6 +1927,7 @@ static int __devinit sci_init_single(struct platform_device *dev,
1913 1927
1914 port->dev = &dev->dev; 1928 port->dev = &dev->dev;
1915 1929
1930 pm_runtime_irq_safe(&dev->dev);
1916 pm_runtime_enable(&dev->dev); 1931 pm_runtime_enable(&dev->dev);
1917 } 1932 }
1918 1933
@@ -2036,7 +2051,8 @@ static int __devinit serial_console_setup(struct console *co, char *options)
2036 if (options) 2051 if (options)
2037 uart_parse_options(options, &baud, &parity, &bits, &flow); 2052 uart_parse_options(options, &baud, &parity, &bits, &flow);
2038 2053
2039 /* TODO: disable clock */ 2054 sci_port_disable(sci_port);
2055
2040 return uart_set_options(port, co, baud, parity, bits, flow); 2056 return uart_set_options(port, co, baud, parity, bits, flow);
2041} 2057}
2042 2058
@@ -2079,6 +2095,36 @@ static int __devinit sci_probe_earlyprintk(struct platform_device *pdev)
2079 return 0; 2095 return 0;
2080} 2096}
2081 2097
2098#define uart_console(port) ((port)->cons->index == (port)->line)
2099
2100static int sci_runtime_suspend(struct device *dev)
2101{
2102 struct sci_port *sci_port = dev_get_drvdata(dev);
2103 struct uart_port *port = &sci_port->port;
2104
2105 if (uart_console(port)) {
2106 sci_port->saved_smr = sci_in(port, SCSMR);
2107 sci_port->saved_brr = sci_in(port, SCBRR);
2108 sci_port->saved_fcr = sci_in(port, SCFCR);
2109 }
2110 return 0;
2111}
2112
2113static int sci_runtime_resume(struct device *dev)
2114{
2115 struct sci_port *sci_port = dev_get_drvdata(dev);
2116 struct uart_port *port = &sci_port->port;
2117
2118 if (uart_console(port)) {
2119 sci_reset(port);
2120 sci_out(port, SCSMR, sci_port->saved_smr);
2121 sci_out(port, SCBRR, sci_port->saved_brr);
2122 sci_out(port, SCFCR, sci_port->saved_fcr);
2123 sci_out(port, SCSCR, sci_port->cfg->scscr);
2124 }
2125 return 0;
2126}
2127
2082#define SCI_CONSOLE (&serial_console) 2128#define SCI_CONSOLE (&serial_console)
2083 2129
2084#else 2130#else
@@ -2088,6 +2134,8 @@ static inline int __devinit sci_probe_earlyprintk(struct platform_device *pdev)
2088} 2134}
2089 2135
2090#define SCI_CONSOLE NULL 2136#define SCI_CONSOLE NULL
2137#define sci_runtime_suspend NULL
2138#define sci_runtime_resume NULL
2091 2139
2092#endif /* CONFIG_SERIAL_SH_SCI_CONSOLE */ 2140#endif /* CONFIG_SERIAL_SH_SCI_CONSOLE */
2093 2141
@@ -2203,6 +2251,8 @@ static int sci_resume(struct device *dev)
2203} 2251}
2204 2252
2205static const struct dev_pm_ops sci_dev_pm_ops = { 2253static const struct dev_pm_ops sci_dev_pm_ops = {
2254 .runtime_suspend = sci_runtime_suspend,
2255 .runtime_resume = sci_runtime_resume,
2206 .suspend = sci_suspend, 2256 .suspend = sci_suspend,
2207 .resume = sci_resume, 2257 .resume = sci_resume,
2208}; 2258};
diff --git a/drivers/tty/serial/ucc_uart.c b/drivers/tty/serial/ucc_uart.c
index c327218cad44..9af9f0879a24 100644
--- a/drivers/tty/serial/ucc_uart.c
+++ b/drivers/tty/serial/ucc_uart.c
@@ -235,7 +235,7 @@ static inline void *qe2cpu_addr(dma_addr_t addr, struct uart_qe_port *qe_port)
235 return qe_port->bd_virt + (addr - qe_port->bd_dma_addr); 235 return qe_port->bd_virt + (addr - qe_port->bd_dma_addr);
236 236
237 /* something nasty happened */ 237 /* something nasty happened */
238 printk(KERN_ERR "%s: addr=%x\n", __func__, addr); 238 printk(KERN_ERR "%s: addr=%llx\n", __func__, (u64)addr);
239 BUG(); 239 BUG();
240 return NULL; 240 return NULL;
241} 241}
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 150e4f747c7d..4f1fc81112e6 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -1295,8 +1295,7 @@ static int tty_driver_install_tty(struct tty_driver *driver,
1295 * 1295 *
1296 * Locking: tty_mutex for now 1296 * Locking: tty_mutex for now
1297 */ 1297 */
1298static void tty_driver_remove_tty(struct tty_driver *driver, 1298void tty_driver_remove_tty(struct tty_driver *driver, struct tty_struct *tty)
1299 struct tty_struct *tty)
1300{ 1299{
1301 if (driver->ops->remove) 1300 if (driver->ops->remove)
1302 driver->ops->remove(driver, tty); 1301 driver->ops->remove(driver, tty);
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index 385acb895ab3..3f94ac34dce3 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -268,7 +268,7 @@ usbtmc_abort_bulk_in_status:
268 dev_err(dev, "usb_bulk_msg returned %d\n", rv); 268 dev_err(dev, "usb_bulk_msg returned %d\n", rv);
269 goto exit; 269 goto exit;
270 } 270 }
271 } while ((actual = max_size) && 271 } while ((actual == max_size) &&
272 (n < USBTMC_MAX_READS_TO_CLEAR_BULK_IN)); 272 (n < USBTMC_MAX_READS_TO_CLEAR_BULK_IN));
273 273
274 if (actual == max_size) { 274 if (actual == max_size) {
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index c962608b4b9a..26678cadfb21 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -123,10 +123,11 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
123 } 123 }
124 124
125 if (usb_endpoint_xfer_isoc(&ep->desc)) 125 if (usb_endpoint_xfer_isoc(&ep->desc))
126 max_tx = ep->desc.wMaxPacketSize * (desc->bMaxBurst + 1) * 126 max_tx = (desc->bMaxBurst + 1) * (desc->bmAttributes + 1) *
127 (desc->bmAttributes + 1); 127 le16_to_cpu(ep->desc.wMaxPacketSize);
128 else if (usb_endpoint_xfer_int(&ep->desc)) 128 else if (usb_endpoint_xfer_int(&ep->desc))
129 max_tx = ep->desc.wMaxPacketSize * (desc->bMaxBurst + 1); 129 max_tx = le16_to_cpu(ep->desc.wMaxPacketSize) *
130 (desc->bMaxBurst + 1);
130 else 131 else
131 max_tx = 999999; 132 max_tx = 999999;
132 if (le16_to_cpu(desc->wBytesPerInterval) > max_tx) { 133 if (le16_to_cpu(desc->wBytesPerInterval) > max_tx) {
@@ -134,10 +135,10 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
134 "config %d interface %d altsetting %d ep %d: " 135 "config %d interface %d altsetting %d ep %d: "
135 "setting to %d\n", 136 "setting to %d\n",
136 usb_endpoint_xfer_isoc(&ep->desc) ? "Isoc" : "Int", 137 usb_endpoint_xfer_isoc(&ep->desc) ? "Isoc" : "Int",
137 desc->wBytesPerInterval, 138 le16_to_cpu(desc->wBytesPerInterval),
138 cfgno, inum, asnum, ep->desc.bEndpointAddress, 139 cfgno, inum, asnum, ep->desc.bEndpointAddress,
139 max_tx); 140 max_tx);
140 ep->ss_ep_comp.wBytesPerInterval = max_tx; 141 ep->ss_ep_comp.wBytesPerInterval = cpu_to_le16(max_tx);
141 } 142 }
142} 143}
143 144
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 8669ba3fe794..73cbbd85219f 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1775,6 +1775,8 @@ int usb_hcd_alloc_bandwidth(struct usb_device *udev,
1775 struct usb_interface *iface = usb_ifnum_to_if(udev, 1775 struct usb_interface *iface = usb_ifnum_to_if(udev,
1776 cur_alt->desc.bInterfaceNumber); 1776 cur_alt->desc.bInterfaceNumber);
1777 1777
1778 if (!iface)
1779 return -EINVAL;
1778 if (iface->resetting_device) { 1780 if (iface->resetting_device) {
1779 /* 1781 /*
1780 * The USB core just reset the device, so the xHCI host 1782 * The USB core just reset the device, so the xHCI host
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 44b6b40aafb4..5a084b9cfa3c 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -310,7 +310,7 @@ config USB_PXA_U2O
310# musb builds in ../musb along with host support 310# musb builds in ../musb along with host support
311config USB_GADGET_MUSB_HDRC 311config USB_GADGET_MUSB_HDRC
312 tristate "Inventra HDRC USB Peripheral (TI, ADI, ...)" 312 tristate "Inventra HDRC USB Peripheral (TI, ADI, ...)"
313 depends on USB_MUSB_HDRC && (USB_MUSB_PERIPHERAL || USB_MUSB_OTG) 313 depends on USB_MUSB_HDRC
314 select USB_GADGET_DUALSPEED 314 select USB_GADGET_DUALSPEED
315 help 315 help
316 This OTG-capable silicon IP is used in dual designs including 316 This OTG-capable silicon IP is used in dual designs including
diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
index 98cbc06c30fd..ddb118a76807 100644
--- a/drivers/usb/gadget/at91_udc.c
+++ b/drivers/usb/gadget/at91_udc.c
@@ -35,6 +35,7 @@
35#include <linux/list.h> 35#include <linux/list.h>
36#include <linux/interrupt.h> 36#include <linux/interrupt.h>
37#include <linux/proc_fs.h> 37#include <linux/proc_fs.h>
38#include <linux/prefetch.h>
38#include <linux/clk.h> 39#include <linux/clk.h>
39#include <linux/usb/ch9.h> 40#include <linux/usb/ch9.h>
40#include <linux/usb/gadget.h> 41#include <linux/usb/gadget.h>
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 5ef87794fd32..aef47414f5d5 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -1079,10 +1079,12 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
1079 cdev->desc.bMaxPacketSize0 = 1079 cdev->desc.bMaxPacketSize0 =
1080 cdev->gadget->ep0->maxpacket; 1080 cdev->gadget->ep0->maxpacket;
1081 if (gadget_is_superspeed(gadget)) { 1081 if (gadget_is_superspeed(gadget)) {
1082 if (gadget->speed >= USB_SPEED_SUPER) 1082 if (gadget->speed >= USB_SPEED_SUPER) {
1083 cdev->desc.bcdUSB = cpu_to_le16(0x0300); 1083 cdev->desc.bcdUSB = cpu_to_le16(0x0300);
1084 else 1084 cdev->desc.bMaxPacketSize0 = 9;
1085 } else {
1085 cdev->desc.bcdUSB = cpu_to_le16(0x0210); 1086 cdev->desc.bcdUSB = cpu_to_le16(0x0210);
1087 }
1086 } 1088 }
1087 1089
1088 value = min(w_length, (u16) sizeof cdev->desc); 1090 value = min(w_length, (u16) sizeof cdev->desc);
diff --git a/drivers/usb/gadget/f_hid.c b/drivers/usb/gadget/f_hid.c
index 403a48bcf560..83a266bdb40e 100644
--- a/drivers/usb/gadget/f_hid.c
+++ b/drivers/usb/gadget/f_hid.c
@@ -367,6 +367,13 @@ static int hidg_setup(struct usb_function *f,
367 case ((USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_INTERFACE) << 8 367 case ((USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_INTERFACE) << 8
368 | USB_REQ_GET_DESCRIPTOR): 368 | USB_REQ_GET_DESCRIPTOR):
369 switch (value >> 8) { 369 switch (value >> 8) {
370 case HID_DT_HID:
371 VDBG(cdev, "USB_REQ_GET_DESCRIPTOR: HID\n");
372 length = min_t(unsigned short, length,
373 hidg_desc.bLength);
374 memcpy(req->buf, &hidg_desc, length);
375 goto respond;
376 break;
370 case HID_DT_REPORT: 377 case HID_DT_REPORT:
371 VDBG(cdev, "USB_REQ_GET_DESCRIPTOR: REPORT\n"); 378 VDBG(cdev, "USB_REQ_GET_DESCRIPTOR: REPORT\n");
372 length = min_t(unsigned short, length, 379 length = min_t(unsigned short, length,
diff --git a/drivers/usb/gadget/f_phonet.c b/drivers/usb/gadget/f_phonet.c
index 8f8d3f6cd89e..8f3eab1af885 100644
--- a/drivers/usb/gadget/f_phonet.c
+++ b/drivers/usb/gadget/f_phonet.c
@@ -434,6 +434,7 @@ static int pn_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
434 config_ep_by_speed(gadget, f, fp->out_ep)) { 434 config_ep_by_speed(gadget, f, fp->out_ep)) {
435 fp->in_ep->desc = NULL; 435 fp->in_ep->desc = NULL;
436 fp->out_ep->desc = NULL; 436 fp->out_ep->desc = NULL;
437 spin_unlock(&port->lock);
437 return -EINVAL; 438 return -EINVAL;
438 } 439 }
439 usb_ep_enable(fp->out_ep); 440 usb_ep_enable(fp->out_ep);
diff --git a/drivers/usb/gadget/fusb300_udc.c b/drivers/usb/gadget/fusb300_udc.c
index 24a924330c81..4ec888f90002 100644
--- a/drivers/usb/gadget/fusb300_udc.c
+++ b/drivers/usb/gadget/fusb300_udc.c
@@ -609,107 +609,6 @@ void fusb300_rdcxf(struct fusb300 *fusb300,
609 } 609 }
610} 610}
611 611
612#if 0
613static void fusb300_dbg_fifo(struct fusb300_ep *ep,
614 u8 entry, u16 length)
615{
616 u32 reg;
617 u32 i = 0;
618 u32 j = 0;
619
620 reg = ioread32(ep->fusb300->reg + FUSB300_OFFSET_GTM);
621 reg &= ~(FUSB300_GTM_TST_EP_ENTRY(0xF) |
622 FUSB300_GTM_TST_EP_NUM(0xF) | FUSB300_GTM_TST_FIFO_DEG);
623 reg |= (FUSB300_GTM_TST_EP_ENTRY(entry) |
624 FUSB300_GTM_TST_EP_NUM(ep->epnum) | FUSB300_GTM_TST_FIFO_DEG);
625 iowrite32(reg, ep->fusb300->reg + FUSB300_OFFSET_GTM);
626
627 for (i = 0; i < (length >> 2); i++) {
628 if (i * 4 == 1024)
629 break;
630 reg = ioread32(ep->fusb300->reg +
631 FUSB300_OFFSET_BUFDBG_START + i * 4);
632 printk(KERN_DEBUG" 0x%-8x", reg);
633 j++;
634 if ((j % 4) == 0)
635 printk(KERN_DEBUG "\n");
636 }
637
638 if (length % 4) {
639 reg = ioread32(ep->fusb300->reg +
640 FUSB300_OFFSET_BUFDBG_START + i * 4);
641 printk(KERN_DEBUG " 0x%x\n", reg);
642 }
643
644 if ((j % 4) != 0)
645 printk(KERN_DEBUG "\n");
646
647 fusb300_disable_bit(ep->fusb300, FUSB300_OFFSET_GTM,
648 FUSB300_GTM_TST_FIFO_DEG);
649}
650
651static void fusb300_cmp_dbg_fifo(struct fusb300_ep *ep,
652 u8 entry, u16 length, u8 *golden)
653{
654 u32 reg;
655 u32 i = 0;
656 u32 golden_value;
657 u8 *tmp;
658
659 tmp = golden;
660
661 printk(KERN_DEBUG "fusb300_cmp_dbg_fifo (entry %d) : start\n", entry);
662
663 reg = ioread32(ep->fusb300->reg + FUSB300_OFFSET_GTM);
664 reg &= ~(FUSB300_GTM_TST_EP_ENTRY(0xF) |
665 FUSB300_GTM_TST_EP_NUM(0xF) | FUSB300_GTM_TST_FIFO_DEG);
666 reg |= (FUSB300_GTM_TST_EP_ENTRY(entry) |
667 FUSB300_GTM_TST_EP_NUM(ep->epnum) | FUSB300_GTM_TST_FIFO_DEG);
668 iowrite32(reg, ep->fusb300->reg + FUSB300_OFFSET_GTM);
669
670 for (i = 0; i < (length >> 2); i++) {
671 if (i * 4 == 1024)
672 break;
673 golden_value = *tmp | *(tmp + 1) << 8 |
674 *(tmp + 2) << 16 | *(tmp + 3) << 24;
675
676 reg = ioread32(ep->fusb300->reg +
677 FUSB300_OFFSET_BUFDBG_START + i*4);
678
679 if (reg != golden_value) {
680 printk(KERN_DEBUG "0x%x : ", (u32)(ep->fusb300->reg +
681 FUSB300_OFFSET_BUFDBG_START + i*4));
682 printk(KERN_DEBUG " golden = 0x%x, reg = 0x%x\n",
683 golden_value, reg);
684 }
685 tmp += 4;
686 }
687
688 switch (length % 4) {
689 case 1:
690 golden_value = *tmp;
691 case 2:
692 golden_value = *tmp | *(tmp + 1) << 8;
693 case 3:
694 golden_value = *tmp | *(tmp + 1) << 8 | *(tmp + 2) << 16;
695 default:
696 break;
697
698 reg = ioread32(ep->fusb300->reg + FUSB300_OFFSET_BUFDBG_START + i*4);
699 if (reg != golden_value) {
700 printk(KERN_DEBUG "0x%x:", (u32)(ep->fusb300->reg +
701 FUSB300_OFFSET_BUFDBG_START + i*4));
702 printk(KERN_DEBUG " golden = 0x%x, reg = 0x%x\n",
703 golden_value, reg);
704 }
705 }
706
707 printk(KERN_DEBUG "fusb300_cmp_dbg_fifo : end\n");
708 fusb300_disable_bit(ep->fusb300, FUSB300_OFFSET_GTM,
709 FUSB300_GTM_TST_FIFO_DEG);
710}
711#endif
712
713static void fusb300_rdfifo(struct fusb300_ep *ep, 612static void fusb300_rdfifo(struct fusb300_ep *ep,
714 struct fusb300_request *req, 613 struct fusb300_request *req,
715 u32 length) 614 u32 length)
diff --git a/drivers/usb/gadget/net2272.c b/drivers/usb/gadget/net2272.c
index 7c7b0e120d88..ab98ea926a11 100644
--- a/drivers/usb/gadget/net2272.c
+++ b/drivers/usb/gadget/net2272.c
@@ -27,13 +27,13 @@
27#include <linux/interrupt.h> 27#include <linux/interrupt.h>
28#include <linux/io.h> 28#include <linux/io.h>
29#include <linux/ioport.h> 29#include <linux/ioport.h>
30#include <linux/irq.h>
31#include <linux/kernel.h> 30#include <linux/kernel.h>
32#include <linux/list.h> 31#include <linux/list.h>
33#include <linux/module.h> 32#include <linux/module.h>
34#include <linux/moduleparam.h> 33#include <linux/moduleparam.h>
35#include <linux/pci.h> 34#include <linux/pci.h>
36#include <linux/platform_device.h> 35#include <linux/platform_device.h>
36#include <linux/prefetch.h>
37#include <linux/sched.h> 37#include <linux/sched.h>
38#include <linux/slab.h> 38#include <linux/slab.h>
39#include <linux/timer.h> 39#include <linux/timer.h>
diff --git a/drivers/usb/gadget/s3c2410_udc.c b/drivers/usb/gadget/s3c2410_udc.c
index 85c1b0d66293..8d31848aab09 100644
--- a/drivers/usb/gadget/s3c2410_udc.c
+++ b/drivers/usb/gadget/s3c2410_udc.c
@@ -2060,6 +2060,7 @@ static int s3c2410_udc_resume(struct platform_device *pdev)
2060static const struct platform_device_id s3c_udc_ids[] = { 2060static const struct platform_device_id s3c_udc_ids[] = {
2061 { "s3c2410-usbgadget", }, 2061 { "s3c2410-usbgadget", },
2062 { "s3c2440-usbgadget", }, 2062 { "s3c2440-usbgadget", },
2063 { }
2063}; 2064};
2064MODULE_DEVICE_TABLE(platform, s3c_udc_ids); 2065MODULE_DEVICE_TABLE(platform, s3c_udc_ids);
2065 2066
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index bf2c8f65e1ae..4c32cb19b405 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -343,7 +343,7 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
343 u32 temp; 343 u32 temp;
344 u32 power_okay; 344 u32 power_okay;
345 int i; 345 int i;
346 u8 resume_needed = 0; 346 unsigned long resume_needed = 0;
347 347
348 if (time_before (jiffies, ehci->next_statechange)) 348 if (time_before (jiffies, ehci->next_statechange))
349 msleep(5); 349 msleep(5);
@@ -416,7 +416,7 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
416 if (test_bit(i, &ehci->bus_suspended) && 416 if (test_bit(i, &ehci->bus_suspended) &&
417 (temp & PORT_SUSPEND)) { 417 (temp & PORT_SUSPEND)) {
418 temp |= PORT_RESUME; 418 temp |= PORT_RESUME;
419 resume_needed = 1; 419 set_bit(i, &resume_needed);
420 } 420 }
421 ehci_writel(ehci, temp, &ehci->regs->port_status [i]); 421 ehci_writel(ehci, temp, &ehci->regs->port_status [i]);
422 } 422 }
@@ -431,8 +431,7 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
431 i = HCS_N_PORTS (ehci->hcs_params); 431 i = HCS_N_PORTS (ehci->hcs_params);
432 while (i--) { 432 while (i--) {
433 temp = ehci_readl(ehci, &ehci->regs->port_status [i]); 433 temp = ehci_readl(ehci, &ehci->regs->port_status [i]);
434 if (test_bit(i, &ehci->bus_suspended) && 434 if (test_bit(i, &resume_needed)) {
435 (temp & PORT_SUSPEND)) {
436 temp &= ~(PORT_RWC_BITS | PORT_RESUME); 435 temp &= ~(PORT_RWC_BITS | PORT_RESUME);
437 ehci_writel(ehci, temp, &ehci->regs->port_status [i]); 436 ehci_writel(ehci, temp, &ehci->regs->port_status [i]);
438 ehci_vdbg (ehci, "resumed port %d\n", i + 1); 437 ehci_vdbg (ehci, "resumed port %d\n", i + 1);
@@ -1046,7 +1045,19 @@ static int ehci_hub_control (
1046 if (!selector || selector > 5) 1045 if (!selector || selector > 5)
1047 goto error; 1046 goto error;
1048 ehci_quiesce(ehci); 1047 ehci_quiesce(ehci);
1048
1049 /* Put all enabled ports into suspend */
1050 while (ports--) {
1051 u32 __iomem *sreg =
1052 &ehci->regs->port_status[ports];
1053
1054 temp = ehci_readl(ehci, sreg) & ~PORT_RWC_BITS;
1055 if (temp & PORT_PE)
1056 ehci_writel(ehci, temp | PORT_SUSPEND,
1057 sreg);
1058 }
1049 ehci_halt(ehci); 1059 ehci_halt(ehci);
1060 temp = ehci_readl(ehci, status_reg);
1050 temp |= selector << 16; 1061 temp |= selector << 16;
1051 ehci_writel(ehci, temp, status_reg); 1062 ehci_writel(ehci, temp, status_reg);
1052 break; 1063 break;
diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c
index 0c058be35a38..555a73c864b5 100644
--- a/drivers/usb/host/ehci-mxc.c
+++ b/drivers/usb/host/ehci-mxc.c
@@ -24,6 +24,7 @@
24#include <linux/usb/ulpi.h> 24#include <linux/usb/ulpi.h>
25#include <linux/slab.h> 25#include <linux/slab.h>
26 26
27#include <mach/hardware.h>
27#include <mach/mxc_ehci.h> 28#include <mach/mxc_ehci.h>
28 29
29#include <asm/mach-types.h> 30#include <asm/mach-types.h>
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index 55a57c23dd0f..45240321ca09 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -98,6 +98,18 @@ static void omap_ehci_soft_phy_reset(struct platform_device *pdev, u8 port)
98 } 98 }
99} 99}
100 100
101static void disable_put_regulator(
102 struct ehci_hcd_omap_platform_data *pdata)
103{
104 int i;
105
106 for (i = 0 ; i < OMAP3_HS_USB_PORTS ; i++) {
107 if (pdata->regulator[i]) {
108 regulator_disable(pdata->regulator[i]);
109 regulator_put(pdata->regulator[i]);
110 }
111 }
112}
101 113
102/* configure so an HC device and id are always provided */ 114/* configure so an HC device and id are always provided */
103/* always called with process context; sleeping is OK */ 115/* always called with process context; sleeping is OK */
@@ -231,9 +243,11 @@ err_add_hcd:
231 omap_usbhs_disable(dev); 243 omap_usbhs_disable(dev);
232 244
233err_enable: 245err_enable:
246 disable_put_regulator(pdata);
234 usb_put_hcd(hcd); 247 usb_put_hcd(hcd);
235 248
236err_io: 249err_io:
250 iounmap(regs);
237 return ret; 251 return ret;
238} 252}
239 253
@@ -253,6 +267,8 @@ static int ehci_hcd_omap_remove(struct platform_device *pdev)
253 267
254 usb_remove_hcd(hcd); 268 usb_remove_hcd(hcd);
255 omap_usbhs_disable(dev); 269 omap_usbhs_disable(dev);
270 disable_put_regulator(dev->platform_data);
271 iounmap(hcd->regs);
256 usb_put_hcd(hcd); 272 usb_put_hcd(hcd);
257 return 0; 273 return 0;
258} 274}
diff --git a/drivers/usb/host/ehci-s5p.c b/drivers/usb/host/ehci-s5p.c
index b3958b3d3163..9e77f1c8bdbd 100644
--- a/drivers/usb/host/ehci-s5p.c
+++ b/drivers/usb/host/ehci-s5p.c
@@ -86,6 +86,7 @@ static int __devinit s5p_ehci_probe(struct platform_device *pdev)
86 goto fail_hcd; 86 goto fail_hcd;
87 } 87 }
88 88
89 s5p_ehci->hcd = hcd;
89 s5p_ehci->clk = clk_get(&pdev->dev, "usbhost"); 90 s5p_ehci->clk = clk_get(&pdev->dev, "usbhost");
90 91
91 if (IS_ERR(s5p_ehci->clk)) { 92 if (IS_ERR(s5p_ehci->clk)) {
diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c
index 55d3d5859ac5..840beda66dd9 100644
--- a/drivers/usb/host/isp1760-hcd.c
+++ b/drivers/usb/host/isp1760-hcd.c
@@ -1583,6 +1583,9 @@ static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
1583 int retval = 0; 1583 int retval = 0;
1584 1584
1585 spin_lock_irqsave(&priv->lock, spinflags); 1585 spin_lock_irqsave(&priv->lock, spinflags);
1586 retval = usb_hcd_check_unlink_urb(hcd, urb, status);
1587 if (retval)
1588 goto out;
1586 1589
1587 qh = urb->ep->hcpriv; 1590 qh = urb->ep->hcpriv;
1588 if (!qh) { 1591 if (!qh) {
diff --git a/drivers/usb/host/ohci-pnx4008.c b/drivers/usb/host/ohci-pnx4008.c
index 653d6a60edb5..d1b41933199e 100644
--- a/drivers/usb/host/ohci-pnx4008.c
+++ b/drivers/usb/host/ohci-pnx4008.c
@@ -26,7 +26,7 @@
26 26
27#include <mach/platform.h> 27#include <mach/platform.h>
28#include <mach/irqs.h> 28#include <mach/irqs.h>
29#include <mach/gpio.h> 29#include <asm/gpio.h>
30 30
31#define USB_CTRL IO_ADDRESS(PNX4008_PWRMAN_BASE + 0x64) 31#define USB_CTRL IO_ADDRESS(PNX4008_PWRMAN_BASE + 0x64)
32 32
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index a9d315906e3d..629a96813fd6 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -535,7 +535,7 @@ static void __devinit quirk_usb_handoff_ohci(struct pci_dev *pdev)
535 iounmap(base); 535 iounmap(base);
536} 536}
537 537
538static const struct dmi_system_id __initconst ehci_dmi_nohandoff_table[] = { 538static const struct dmi_system_id __devinitconst ehci_dmi_nohandoff_table[] = {
539 { 539 {
540 /* Pegatron Lucid (ExoPC) */ 540 /* Pegatron Lucid (ExoPC) */
541 .matches = { 541 .matches = {
@@ -817,7 +817,7 @@ static void __devinit quirk_usb_handoff_xhci(struct pci_dev *pdev)
817 817
818 /* If the BIOS owns the HC, signal that the OS wants it, and wait */ 818 /* If the BIOS owns the HC, signal that the OS wants it, and wait */
819 if (val & XHCI_HC_BIOS_OWNED) { 819 if (val & XHCI_HC_BIOS_OWNED) {
820 writel(val & XHCI_HC_OS_OWNED, base + ext_cap_offset); 820 writel(val | XHCI_HC_OS_OWNED, base + ext_cap_offset);
821 821
822 /* Wait for 5 seconds with 10 microsecond polling interval */ 822 /* Wait for 5 seconds with 10 microsecond polling interval */
823 timeout = handshake(base + ext_cap_offset, XHCI_HC_BIOS_OWNED, 823 timeout = handshake(base + ext_cap_offset, XHCI_HC_BIOS_OWNED,
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 0be788cc2fdb..723f8231193d 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -463,11 +463,12 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
463 && (temp & PORT_POWER)) 463 && (temp & PORT_POWER))
464 status |= USB_PORT_STAT_SUSPEND; 464 status |= USB_PORT_STAT_SUSPEND;
465 } 465 }
466 if ((temp & PORT_PLS_MASK) == XDEV_RESUME) { 466 if ((temp & PORT_PLS_MASK) == XDEV_RESUME &&
467 !DEV_SUPERSPEED(temp)) {
467 if ((temp & PORT_RESET) || !(temp & PORT_PE)) 468 if ((temp & PORT_RESET) || !(temp & PORT_PE))
468 goto error; 469 goto error;
469 if (!DEV_SUPERSPEED(temp) && time_after_eq(jiffies, 470 if (time_after_eq(jiffies,
470 bus_state->resume_done[wIndex])) { 471 bus_state->resume_done[wIndex])) {
471 xhci_dbg(xhci, "Resume USB2 port %d\n", 472 xhci_dbg(xhci, "Resume USB2 port %d\n",
472 wIndex + 1); 473 wIndex + 1);
473 bus_state->resume_done[wIndex] = 0; 474 bus_state->resume_done[wIndex] = 0;
@@ -487,6 +488,14 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
487 xhci_ring_device(xhci, slot_id); 488 xhci_ring_device(xhci, slot_id);
488 bus_state->port_c_suspend |= 1 << wIndex; 489 bus_state->port_c_suspend |= 1 << wIndex;
489 bus_state->suspended_ports &= ~(1 << wIndex); 490 bus_state->suspended_ports &= ~(1 << wIndex);
491 } else {
492 /*
493 * The resume has been signaling for less than
494 * 20ms. Report the port status as SUSPEND,
495 * let the usbcore check port status again
496 * and clear resume signaling later.
497 */
498 status |= USB_PORT_STAT_SUSPEND;
490 } 499 }
491 } 500 }
492 if ((temp & PORT_PLS_MASK) == XDEV_U0 501 if ((temp & PORT_PLS_MASK) == XDEV_U0
@@ -664,7 +673,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
664 xhci_dbg(xhci, "PORTSC %04x\n", temp); 673 xhci_dbg(xhci, "PORTSC %04x\n", temp);
665 if (temp & PORT_RESET) 674 if (temp & PORT_RESET)
666 goto error; 675 goto error;
667 if (temp & XDEV_U3) { 676 if ((temp & PORT_PLS_MASK) == XDEV_U3) {
668 if ((temp & PORT_PE) == 0) 677 if ((temp & PORT_PE) == 0)
669 goto error; 678 goto error;
670 679
@@ -752,7 +761,7 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
752 memset(buf, 0, retval); 761 memset(buf, 0, retval);
753 status = 0; 762 status = 0;
754 763
755 mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC; 764 mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC;
756 765
757 spin_lock_irqsave(&xhci->lock, flags); 766 spin_lock_irqsave(&xhci->lock, flags);
758 /* For each port, did anything change? If so, set that bit in buf. */ 767 /* For each port, did anything change? If so, set that bit in buf. */
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 7113d16e2d3a..952e2ded61af 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -514,8 +514,12 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
514 (unsigned long long) addr); 514 (unsigned long long) addr);
515} 515}
516 516
517/* flip_cycle means flip the cycle bit of all but the first and last TRB.
518 * (The last TRB actually points to the ring enqueue pointer, which is not part
519 * of this TD.) This is used to remove partially enqueued isoc TDs from a ring.
520 */
517static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, 521static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
518 struct xhci_td *cur_td) 522 struct xhci_td *cur_td, bool flip_cycle)
519{ 523{
520 struct xhci_segment *cur_seg; 524 struct xhci_segment *cur_seg;
521 union xhci_trb *cur_trb; 525 union xhci_trb *cur_trb;
@@ -528,6 +532,12 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
528 * leave the pointers intact. 532 * leave the pointers intact.
529 */ 533 */
530 cur_trb->generic.field[3] &= cpu_to_le32(~TRB_CHAIN); 534 cur_trb->generic.field[3] &= cpu_to_le32(~TRB_CHAIN);
535 /* Flip the cycle bit (link TRBs can't be the first
536 * or last TRB).
537 */
538 if (flip_cycle)
539 cur_trb->generic.field[3] ^=
540 cpu_to_le32(TRB_CYCLE);
531 xhci_dbg(xhci, "Cancel (unchain) link TRB\n"); 541 xhci_dbg(xhci, "Cancel (unchain) link TRB\n");
532 xhci_dbg(xhci, "Address = %p (0x%llx dma); " 542 xhci_dbg(xhci, "Address = %p (0x%llx dma); "
533 "in seg %p (0x%llx dma)\n", 543 "in seg %p (0x%llx dma)\n",
@@ -541,6 +551,11 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
541 cur_trb->generic.field[2] = 0; 551 cur_trb->generic.field[2] = 0;
542 /* Preserve only the cycle bit of this TRB */ 552 /* Preserve only the cycle bit of this TRB */
543 cur_trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE); 553 cur_trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
554 /* Flip the cycle bit except on the first or last TRB */
555 if (flip_cycle && cur_trb != cur_td->first_trb &&
556 cur_trb != cur_td->last_trb)
557 cur_trb->generic.field[3] ^=
558 cpu_to_le32(TRB_CYCLE);
544 cur_trb->generic.field[3] |= cpu_to_le32( 559 cur_trb->generic.field[3] |= cpu_to_le32(
545 TRB_TYPE(TRB_TR_NOOP)); 560 TRB_TYPE(TRB_TR_NOOP));
546 xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) " 561 xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) "
@@ -719,14 +734,14 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
719 cur_td->urb->stream_id, 734 cur_td->urb->stream_id,
720 cur_td, &deq_state); 735 cur_td, &deq_state);
721 else 736 else
722 td_to_noop(xhci, ep_ring, cur_td); 737 td_to_noop(xhci, ep_ring, cur_td, false);
723remove_finished_td: 738remove_finished_td:
724 /* 739 /*
725 * The event handler won't see a completion for this TD anymore, 740 * The event handler won't see a completion for this TD anymore,
726 * so remove it from the endpoint ring's TD list. Keep it in 741 * so remove it from the endpoint ring's TD list. Keep it in
727 * the cancelled TD list for URB completion later. 742 * the cancelled TD list for URB completion later.
728 */ 743 */
729 list_del(&cur_td->td_list); 744 list_del_init(&cur_td->td_list);
730 } 745 }
731 last_unlinked_td = cur_td; 746 last_unlinked_td = cur_td;
732 xhci_stop_watchdog_timer_in_irq(xhci, ep); 747 xhci_stop_watchdog_timer_in_irq(xhci, ep);
@@ -754,7 +769,7 @@ remove_finished_td:
754 do { 769 do {
755 cur_td = list_entry(ep->cancelled_td_list.next, 770 cur_td = list_entry(ep->cancelled_td_list.next,
756 struct xhci_td, cancelled_td_list); 771 struct xhci_td, cancelled_td_list);
757 list_del(&cur_td->cancelled_td_list); 772 list_del_init(&cur_td->cancelled_td_list);
758 773
759 /* Clean up the cancelled URB */ 774 /* Clean up the cancelled URB */
760 /* Doesn't matter what we pass for status, since the core will 775 /* Doesn't matter what we pass for status, since the core will
@@ -862,9 +877,9 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
862 cur_td = list_first_entry(&ring->td_list, 877 cur_td = list_first_entry(&ring->td_list,
863 struct xhci_td, 878 struct xhci_td,
864 td_list); 879 td_list);
865 list_del(&cur_td->td_list); 880 list_del_init(&cur_td->td_list);
866 if (!list_empty(&cur_td->cancelled_td_list)) 881 if (!list_empty(&cur_td->cancelled_td_list))
867 list_del(&cur_td->cancelled_td_list); 882 list_del_init(&cur_td->cancelled_td_list);
868 xhci_giveback_urb_in_irq(xhci, cur_td, 883 xhci_giveback_urb_in_irq(xhci, cur_td,
869 -ESHUTDOWN, "killed"); 884 -ESHUTDOWN, "killed");
870 } 885 }
@@ -873,7 +888,7 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
873 &temp_ep->cancelled_td_list, 888 &temp_ep->cancelled_td_list,
874 struct xhci_td, 889 struct xhci_td,
875 cancelled_td_list); 890 cancelled_td_list);
876 list_del(&cur_td->cancelled_td_list); 891 list_del_init(&cur_td->cancelled_td_list);
877 xhci_giveback_urb_in_irq(xhci, cur_td, 892 xhci_giveback_urb_in_irq(xhci, cur_td,
878 -ESHUTDOWN, "killed"); 893 -ESHUTDOWN, "killed");
879 } 894 }
@@ -1565,10 +1580,10 @@ td_cleanup:
1565 else 1580 else
1566 *status = 0; 1581 *status = 0;
1567 } 1582 }
1568 list_del(&td->td_list); 1583 list_del_init(&td->td_list);
1569 /* Was this TD slated to be cancelled but completed anyway? */ 1584 /* Was this TD slated to be cancelled but completed anyway? */
1570 if (!list_empty(&td->cancelled_td_list)) 1585 if (!list_empty(&td->cancelled_td_list))
1571 list_del(&td->cancelled_td_list); 1586 list_del_init(&td->cancelled_td_list);
1572 1587
1573 urb_priv->td_cnt++; 1588 urb_priv->td_cnt++;
1574 /* Giveback the urb when all the tds are completed */ 1589 /* Giveback the urb when all the tds are completed */
@@ -1919,8 +1934,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
1919 int status = -EINPROGRESS; 1934 int status = -EINPROGRESS;
1920 struct urb_priv *urb_priv; 1935 struct urb_priv *urb_priv;
1921 struct xhci_ep_ctx *ep_ctx; 1936 struct xhci_ep_ctx *ep_ctx;
1937 struct list_head *tmp;
1922 u32 trb_comp_code; 1938 u32 trb_comp_code;
1923 int ret = 0; 1939 int ret = 0;
1940 int td_num = 0;
1924 1941
1925 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); 1942 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1926 xdev = xhci->devs[slot_id]; 1943 xdev = xhci->devs[slot_id];
@@ -1942,6 +1959,12 @@ static int handle_tx_event(struct xhci_hcd *xhci,
1942 return -ENODEV; 1959 return -ENODEV;
1943 } 1960 }
1944 1961
1962 /* Count current td numbers if ep->skip is set */
1963 if (ep->skip) {
1964 list_for_each(tmp, &ep_ring->td_list)
1965 td_num++;
1966 }
1967
1945 event_dma = le64_to_cpu(event->buffer); 1968 event_dma = le64_to_cpu(event->buffer);
1946 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); 1969 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1947 /* Look for common error cases */ 1970 /* Look for common error cases */
@@ -2053,7 +2076,18 @@ static int handle_tx_event(struct xhci_hcd *xhci,
2053 goto cleanup; 2076 goto cleanup;
2054 } 2077 }
2055 2078
2079 /* We've skipped all the TDs on the ep ring when ep->skip set */
2080 if (ep->skip && td_num == 0) {
2081 ep->skip = false;
2082 xhci_dbg(xhci, "All tds on the ep_ring skipped. "
2083 "Clear skip flag.\n");
2084 ret = 0;
2085 goto cleanup;
2086 }
2087
2056 td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list); 2088 td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
2089 if (ep->skip)
2090 td_num--;
2057 2091
2058 /* Is this a TRB in the currently executing TD? */ 2092 /* Is this a TRB in the currently executing TD? */
2059 event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue, 2093 event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue,
@@ -2500,11 +2534,8 @@ static int prepare_transfer(struct xhci_hcd *xhci,
2500 2534
2501 if (td_index == 0) { 2535 if (td_index == 0) {
2502 ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb); 2536 ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb);
2503 if (unlikely(ret)) { 2537 if (unlikely(ret))
2504 xhci_urb_free_priv(xhci, urb_priv);
2505 urb->hcpriv = NULL;
2506 return ret; 2538 return ret;
2507 }
2508 } 2539 }
2509 2540
2510 td->urb = urb; 2541 td->urb = urb;
@@ -2672,6 +2703,10 @@ static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len,
2672{ 2703{
2673 int packets_transferred; 2704 int packets_transferred;
2674 2705
2706 /* One TRB with a zero-length data packet. */
2707 if (running_total == 0 && trb_buff_len == 0)
2708 return 0;
2709
2675 /* All the TRB queueing functions don't count the current TRB in 2710 /* All the TRB queueing functions don't count the current TRB in
2676 * running_total. 2711 * running_total.
2677 */ 2712 */
@@ -3113,20 +3148,15 @@ static int count_isoc_trbs_needed(struct xhci_hcd *xhci,
3113 struct urb *urb, int i) 3148 struct urb *urb, int i)
3114{ 3149{
3115 int num_trbs = 0; 3150 int num_trbs = 0;
3116 u64 addr, td_len, running_total; 3151 u64 addr, td_len;
3117 3152
3118 addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset); 3153 addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
3119 td_len = urb->iso_frame_desc[i].length; 3154 td_len = urb->iso_frame_desc[i].length;
3120 3155
3121 running_total = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1)); 3156 num_trbs = DIV_ROUND_UP(td_len + (addr & (TRB_MAX_BUFF_SIZE - 1)),
3122 running_total &= TRB_MAX_BUFF_SIZE - 1; 3157 TRB_MAX_BUFF_SIZE);
3123 if (running_total != 0) 3158 if (num_trbs == 0)
3124 num_trbs++;
3125
3126 while (running_total < td_len) {
3127 num_trbs++; 3159 num_trbs++;
3128 running_total += TRB_MAX_BUFF_SIZE;
3129 }
3130 3160
3131 return num_trbs; 3161 return num_trbs;
3132} 3162}
@@ -3226,6 +3256,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3226 start_trb = &ep_ring->enqueue->generic; 3256 start_trb = &ep_ring->enqueue->generic;
3227 start_cycle = ep_ring->cycle_state; 3257 start_cycle = ep_ring->cycle_state;
3228 3258
3259 urb_priv = urb->hcpriv;
3229 /* Queue the first TRB, even if it's zero-length */ 3260 /* Queue the first TRB, even if it's zero-length */
3230 for (i = 0; i < num_tds; i++) { 3261 for (i = 0; i < num_tds; i++) {
3231 unsigned int total_packet_count; 3262 unsigned int total_packet_count;
@@ -3237,9 +3268,11 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3237 addr = start_addr + urb->iso_frame_desc[i].offset; 3268 addr = start_addr + urb->iso_frame_desc[i].offset;
3238 td_len = urb->iso_frame_desc[i].length; 3269 td_len = urb->iso_frame_desc[i].length;
3239 td_remain_len = td_len; 3270 td_remain_len = td_len;
3240 /* FIXME: Ignoring zero-length packets, can those happen? */
3241 total_packet_count = roundup(td_len, 3271 total_packet_count = roundup(td_len,
3242 le16_to_cpu(urb->ep->desc.wMaxPacketSize)); 3272 le16_to_cpu(urb->ep->desc.wMaxPacketSize));
3273 /* A zero-length transfer still involves at least one packet. */
3274 if (total_packet_count == 0)
3275 total_packet_count++;
3243 burst_count = xhci_get_burst_count(xhci, urb->dev, urb, 3276 burst_count = xhci_get_burst_count(xhci, urb->dev, urb,
3244 total_packet_count); 3277 total_packet_count);
3245 residue = xhci_get_last_burst_packet_count(xhci, 3278 residue = xhci_get_last_burst_packet_count(xhci,
@@ -3249,12 +3282,13 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3249 3282
3250 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, 3283 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
3251 urb->stream_id, trbs_per_td, urb, i, mem_flags); 3284 urb->stream_id, trbs_per_td, urb, i, mem_flags);
3252 if (ret < 0) 3285 if (ret < 0) {
3253 return ret; 3286 if (i == 0)
3287 return ret;
3288 goto cleanup;
3289 }
3254 3290
3255 urb_priv = urb->hcpriv;
3256 td = urb_priv->td[i]; 3291 td = urb_priv->td[i];
3257
3258 for (j = 0; j < trbs_per_td; j++) { 3292 for (j = 0; j < trbs_per_td; j++) {
3259 u32 remainder = 0; 3293 u32 remainder = 0;
3260 field = TRB_TBC(burst_count) | TRB_TLBPC(residue); 3294 field = TRB_TBC(burst_count) | TRB_TLBPC(residue);
@@ -3344,6 +3378,27 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3344 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, 3378 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
3345 start_cycle, start_trb); 3379 start_cycle, start_trb);
3346 return 0; 3380 return 0;
3381cleanup:
3382 /* Clean up a partially enqueued isoc transfer. */
3383
3384 for (i--; i >= 0; i--)
3385 list_del_init(&urb_priv->td[i]->td_list);
3386
3387 /* Use the first TD as a temporary variable to turn the TDs we've queued
3388 * into No-ops with a software-owned cycle bit. That way the hardware
3389 * won't accidentally start executing bogus TDs when we partially
3390 * overwrite them. td->first_trb and td->start_seg are already set.
3391 */
3392 urb_priv->td[0]->last_trb = ep_ring->enqueue;
3393 /* Every TRB except the first & last will have its cycle bit flipped. */
3394 td_to_noop(xhci, ep_ring, urb_priv->td[0], true);
3395
3396 /* Reset the ring enqueue back to the first TRB and its cycle bit. */
3397 ep_ring->enqueue = urb_priv->td[0]->first_trb;
3398 ep_ring->enq_seg = urb_priv->td[0]->start_seg;
3399 ep_ring->cycle_state = start_cycle;
3400 usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
3401 return ret;
3347} 3402}
3348 3403
3349/* 3404/*
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 763f484bc092..3a0f695138f4 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -345,7 +345,8 @@ static void xhci_event_ring_work(unsigned long arg)
345 spin_lock_irqsave(&xhci->lock, flags); 345 spin_lock_irqsave(&xhci->lock, flags);
346 temp = xhci_readl(xhci, &xhci->op_regs->status); 346 temp = xhci_readl(xhci, &xhci->op_regs->status);
347 xhci_dbg(xhci, "op reg status = 0x%x\n", temp); 347 xhci_dbg(xhci, "op reg status = 0x%x\n", temp);
348 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING)) { 348 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
349 (xhci->xhc_state & XHCI_STATE_HALTED)) {
349 xhci_dbg(xhci, "HW died, polling stopped.\n"); 350 xhci_dbg(xhci, "HW died, polling stopped.\n");
350 spin_unlock_irqrestore(&xhci->lock, flags); 351 spin_unlock_irqrestore(&xhci->lock, flags);
351 return; 352 return;
@@ -939,8 +940,11 @@ static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
939 return 0; 940 return 0;
940 } 941 }
941 942
943 xhci = hcd_to_xhci(hcd);
944 if (xhci->xhc_state & XHCI_STATE_HALTED)
945 return -ENODEV;
946
942 if (check_virt_dev) { 947 if (check_virt_dev) {
943 xhci = hcd_to_xhci(hcd);
944 if (!udev->slot_id || !xhci->devs 948 if (!udev->slot_id || !xhci->devs
945 || !xhci->devs[udev->slot_id]) { 949 || !xhci->devs[udev->slot_id]) {
946 printk(KERN_DEBUG "xHCI %s called with unaddressed " 950 printk(KERN_DEBUG "xHCI %s called with unaddressed "
@@ -1081,8 +1085,11 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1081 if (urb->dev->speed == USB_SPEED_FULL) { 1085 if (urb->dev->speed == USB_SPEED_FULL) {
1082 ret = xhci_check_maxpacket(xhci, slot_id, 1086 ret = xhci_check_maxpacket(xhci, slot_id,
1083 ep_index, urb); 1087 ep_index, urb);
1084 if (ret < 0) 1088 if (ret < 0) {
1089 xhci_urb_free_priv(xhci, urb_priv);
1090 urb->hcpriv = NULL;
1085 return ret; 1091 return ret;
1092 }
1086 } 1093 }
1087 1094
1088 /* We have a spinlock and interrupts disabled, so we must pass 1095 /* We have a spinlock and interrupts disabled, so we must pass
@@ -1093,6 +1100,8 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1093 goto dying; 1100 goto dying;
1094 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb, 1101 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
1095 slot_id, ep_index); 1102 slot_id, ep_index);
1103 if (ret)
1104 goto free_priv;
1096 spin_unlock_irqrestore(&xhci->lock, flags); 1105 spin_unlock_irqrestore(&xhci->lock, flags);
1097 } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) { 1106 } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) {
1098 spin_lock_irqsave(&xhci->lock, flags); 1107 spin_lock_irqsave(&xhci->lock, flags);
@@ -1113,6 +1122,8 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1113 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, 1122 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
1114 slot_id, ep_index); 1123 slot_id, ep_index);
1115 } 1124 }
1125 if (ret)
1126 goto free_priv;
1116 spin_unlock_irqrestore(&xhci->lock, flags); 1127 spin_unlock_irqrestore(&xhci->lock, flags);
1117 } else if (usb_endpoint_xfer_int(&urb->ep->desc)) { 1128 } else if (usb_endpoint_xfer_int(&urb->ep->desc)) {
1118 spin_lock_irqsave(&xhci->lock, flags); 1129 spin_lock_irqsave(&xhci->lock, flags);
@@ -1120,6 +1131,8 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1120 goto dying; 1131 goto dying;
1121 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb, 1132 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
1122 slot_id, ep_index); 1133 slot_id, ep_index);
1134 if (ret)
1135 goto free_priv;
1123 spin_unlock_irqrestore(&xhci->lock, flags); 1136 spin_unlock_irqrestore(&xhci->lock, flags);
1124 } else { 1137 } else {
1125 spin_lock_irqsave(&xhci->lock, flags); 1138 spin_lock_irqsave(&xhci->lock, flags);
@@ -1127,18 +1140,22 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1127 goto dying; 1140 goto dying;
1128 ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb, 1141 ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb,
1129 slot_id, ep_index); 1142 slot_id, ep_index);
1143 if (ret)
1144 goto free_priv;
1130 spin_unlock_irqrestore(&xhci->lock, flags); 1145 spin_unlock_irqrestore(&xhci->lock, flags);
1131 } 1146 }
1132exit: 1147exit:
1133 return ret; 1148 return ret;
1134dying: 1149dying:
1135 xhci_urb_free_priv(xhci, urb_priv);
1136 urb->hcpriv = NULL;
1137 xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for " 1150 xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for "
1138 "non-responsive xHCI host.\n", 1151 "non-responsive xHCI host.\n",
1139 urb->ep->desc.bEndpointAddress, urb); 1152 urb->ep->desc.bEndpointAddress, urb);
1153 ret = -ESHUTDOWN;
1154free_priv:
1155 xhci_urb_free_priv(xhci, urb_priv);
1156 urb->hcpriv = NULL;
1140 spin_unlock_irqrestore(&xhci->lock, flags); 1157 spin_unlock_irqrestore(&xhci->lock, flags);
1141 return -ESHUTDOWN; 1158 return ret;
1142} 1159}
1143 1160
1144/* Get the right ring for the given URB. 1161/* Get the right ring for the given URB.
@@ -1235,6 +1252,13 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1235 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) { 1252 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) {
1236 xhci_dbg(xhci, "HW died, freeing TD.\n"); 1253 xhci_dbg(xhci, "HW died, freeing TD.\n");
1237 urb_priv = urb->hcpriv; 1254 urb_priv = urb->hcpriv;
1255 for (i = urb_priv->td_cnt; i < urb_priv->length; i++) {
1256 td = urb_priv->td[i];
1257 if (!list_empty(&td->td_list))
1258 list_del_init(&td->td_list);
1259 if (!list_empty(&td->cancelled_td_list))
1260 list_del_init(&td->cancelled_td_list);
1261 }
1238 1262
1239 usb_hcd_unlink_urb_from_ep(hcd, urb); 1263 usb_hcd_unlink_urb_from_ep(hcd, urb);
1240 spin_unlock_irqrestore(&xhci->lock, flags); 1264 spin_unlock_irqrestore(&xhci->lock, flags);
@@ -1242,7 +1266,8 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1242 xhci_urb_free_priv(xhci, urb_priv); 1266 xhci_urb_free_priv(xhci, urb_priv);
1243 return ret; 1267 return ret;
1244 } 1268 }
1245 if (xhci->xhc_state & XHCI_STATE_DYING) { 1269 if ((xhci->xhc_state & XHCI_STATE_DYING) ||
1270 (xhci->xhc_state & XHCI_STATE_HALTED)) {
1246 xhci_dbg(xhci, "Ep 0x%x: URB %p to be canceled on " 1271 xhci_dbg(xhci, "Ep 0x%x: URB %p to be canceled on "
1247 "non-responsive xHCI host.\n", 1272 "non-responsive xHCI host.\n",
1248 urb->ep->desc.bEndpointAddress, urb); 1273 urb->ep->desc.bEndpointAddress, urb);
@@ -2665,7 +2690,10 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
2665 int i, ret; 2690 int i, ret;
2666 2691
2667 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__); 2692 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
2668 if (ret <= 0) 2693 /* If the host is halted due to driver unload, we still need to free the
2694 * device.
2695 */
2696 if (ret <= 0 && ret != -ENODEV)
2669 return; 2697 return;
2670 2698
2671 virt_dev = xhci->devs[udev->slot_id]; 2699 virt_dev = xhci->devs[udev->slot_id];
@@ -2679,7 +2707,8 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
2679 spin_lock_irqsave(&xhci->lock, flags); 2707 spin_lock_irqsave(&xhci->lock, flags);
2680 /* Don't disable the slot if the host controller is dead. */ 2708 /* Don't disable the slot if the host controller is dead. */
2681 state = xhci_readl(xhci, &xhci->op_regs->status); 2709 state = xhci_readl(xhci, &xhci->op_regs->status);
2682 if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING)) { 2710 if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
2711 (xhci->xhc_state & XHCI_STATE_HALTED)) {
2683 xhci_free_virt_device(xhci, udev->slot_id); 2712 xhci_free_virt_device(xhci, udev->slot_id);
2684 spin_unlock_irqrestore(&xhci->lock, flags); 2713 spin_unlock_irqrestore(&xhci->lock, flags);
2685 return; 2714 return;
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index 6192b45959f4..fc34b8b11910 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -3,9 +3,6 @@
3# for silicon based on Mentor Graphics INVENTRA designs 3# for silicon based on Mentor Graphics INVENTRA designs
4# 4#
5 5
6comment "Enable Host or Gadget support to see Inventra options"
7 depends on !USB && USB_GADGET=n
8
9# (M)HDRC = (Multipoint) Highspeed Dual-Role Controller 6# (M)HDRC = (Multipoint) Highspeed Dual-Role Controller
10config USB_MUSB_HDRC 7config USB_MUSB_HDRC
11 depends on USB && USB_GADGET 8 depends on USB && USB_GADGET
diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c
index ae8c39617743..5e7cfba5b079 100644
--- a/drivers/usb/musb/blackfin.c
+++ b/drivers/usb/musb/blackfin.c
@@ -17,6 +17,7 @@
17#include <linux/io.h> 17#include <linux/io.h>
18#include <linux/platform_device.h> 18#include <linux/platform_device.h>
19#include <linux/dma-mapping.h> 19#include <linux/dma-mapping.h>
20#include <linux/prefetch.h>
20 21
21#include <asm/cacheflush.h> 22#include <asm/cacheflush.h>
22 23
diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c
index 149f3f310a0a..318fb4e8a885 100644
--- a/drivers/usb/musb/cppi_dma.c
+++ b/drivers/usb/musb/cppi_dma.c
@@ -226,8 +226,10 @@ static int cppi_controller_stop(struct dma_controller *c)
226 struct cppi *controller; 226 struct cppi *controller;
227 void __iomem *tibase; 227 void __iomem *tibase;
228 int i; 228 int i;
229 struct musb *musb;
229 230
230 controller = container_of(c, struct cppi, controller); 231 controller = container_of(c, struct cppi, controller);
232 musb = controller->musb;
231 233
232 tibase = controller->tibase; 234 tibase = controller->tibase;
233 /* DISABLE INDIVIDUAL CHANNEL Interrupts */ 235 /* DISABLE INDIVIDUAL CHANNEL Interrupts */
@@ -289,9 +291,11 @@ cppi_channel_allocate(struct dma_controller *c,
289 u8 index; 291 u8 index;
290 struct cppi_channel *cppi_ch; 292 struct cppi_channel *cppi_ch;
291 void __iomem *tibase; 293 void __iomem *tibase;
294 struct musb *musb;
292 295
293 controller = container_of(c, struct cppi, controller); 296 controller = container_of(c, struct cppi, controller);
294 tibase = controller->tibase; 297 tibase = controller->tibase;
298 musb = controller->musb;
295 299
296 /* ep0 doesn't use DMA; remember cppi indices are 0..N-1 */ 300 /* ep0 doesn't use DMA; remember cppi indices are 0..N-1 */
297 index = ep->epnum - 1; 301 index = ep->epnum - 1;
@@ -339,7 +343,8 @@ static void cppi_channel_release(struct dma_channel *channel)
339 c = container_of(channel, struct cppi_channel, channel); 343 c = container_of(channel, struct cppi_channel, channel);
340 tibase = c->controller->tibase; 344 tibase = c->controller->tibase;
341 if (!c->hw_ep) 345 if (!c->hw_ep)
342 dev_dbg(musb->controller, "releasing idle DMA channel %p\n", c); 346 dev_dbg(c->controller->musb->controller,
347 "releasing idle DMA channel %p\n", c);
343 else if (!c->transmit) 348 else if (!c->transmit)
344 core_rxirq_enable(tibase, c->index + 1); 349 core_rxirq_enable(tibase, c->index + 1);
345 350
@@ -357,10 +362,11 @@ cppi_dump_rx(int level, struct cppi_channel *c, const char *tag)
357 362
358 musb_ep_select(base, c->index + 1); 363 musb_ep_select(base, c->index + 1);
359 364
360 DBG(level, "RX DMA%d%s: %d left, csr %04x, " 365 dev_dbg(c->controller->musb->controller,
361 "%08x H%08x S%08x C%08x, " 366 "RX DMA%d%s: %d left, csr %04x, "
362 "B%08x L%08x %08x .. %08x" 367 "%08x H%08x S%08x C%08x, "
363 "\n", 368 "B%08x L%08x %08x .. %08x"
369 "\n",
364 c->index, tag, 370 c->index, tag,
365 musb_readl(c->controller->tibase, 371 musb_readl(c->controller->tibase,
366 DAVINCI_RXCPPI_BUFCNT0_REG + 4 * c->index), 372 DAVINCI_RXCPPI_BUFCNT0_REG + 4 * c->index),
@@ -387,10 +393,11 @@ cppi_dump_tx(int level, struct cppi_channel *c, const char *tag)
387 393
388 musb_ep_select(base, c->index + 1); 394 musb_ep_select(base, c->index + 1);
389 395
390 DBG(level, "TX DMA%d%s: csr %04x, " 396 dev_dbg(c->controller->musb->controller,
391 "H%08x S%08x C%08x %08x, " 397 "TX DMA%d%s: csr %04x, "
392 "F%08x L%08x .. %08x" 398 "H%08x S%08x C%08x %08x, "
393 "\n", 399 "F%08x L%08x .. %08x"
400 "\n",
394 c->index, tag, 401 c->index, tag,
395 musb_readw(c->hw_ep->regs, MUSB_TXCSR), 402 musb_readw(c->hw_ep->regs, MUSB_TXCSR),
396 403
@@ -1022,6 +1029,7 @@ static bool cppi_rx_scan(struct cppi *cppi, unsigned ch)
1022 int i; 1029 int i;
1023 dma_addr_t safe2ack; 1030 dma_addr_t safe2ack;
1024 void __iomem *regs = rx->hw_ep->regs; 1031 void __iomem *regs = rx->hw_ep->regs;
1032 struct musb *musb = cppi->musb;
1025 1033
1026 cppi_dump_rx(6, rx, "/K"); 1034 cppi_dump_rx(6, rx, "/K");
1027 1035
diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c
index 8bdf25a8b023..f9a3f62a83b5 100644
--- a/drivers/usb/musb/davinci.c
+++ b/drivers/usb/musb/davinci.c
@@ -35,7 +35,7 @@
35 35
36#include <mach/hardware.h> 36#include <mach/hardware.h>
37#include <mach/memory.h> 37#include <mach/memory.h>
38#include <mach/gpio.h> 38#include <asm/gpio.h>
39#include <mach/cputype.h> 39#include <mach/cputype.h>
40 40
41#include <asm/mach-types.h> 41#include <asm/mach-types.h>
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index 668eeef601ae..b3c065ab9dbc 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -172,7 +172,8 @@ enum musb_g_ep0_state {
172#endif 172#endif
173 173
174/* TUSB mapping: "flat" plus ep0 special cases */ 174/* TUSB mapping: "flat" plus ep0 special cases */
175#if defined(CONFIG_USB_MUSB_TUSB6010) 175#if defined(CONFIG_USB_MUSB_TUSB6010) || \
176 defined(CONFIG_USB_MUSB_TUSB6010_MODULE)
176#define musb_ep_select(_mbase, _epnum) \ 177#define musb_ep_select(_mbase, _epnum) \
177 musb_writeb((_mbase), MUSB_INDEX, (_epnum)) 178 musb_writeb((_mbase), MUSB_INDEX, (_epnum))
178#define MUSB_EP_OFFSET MUSB_TUSB_OFFSET 179#define MUSB_EP_OFFSET MUSB_TUSB_OFFSET
@@ -241,7 +242,8 @@ struct musb_hw_ep {
241 void __iomem *fifo; 242 void __iomem *fifo;
242 void __iomem *regs; 243 void __iomem *regs;
243 244
244#ifdef CONFIG_USB_MUSB_TUSB6010 245#if defined(CONFIG_USB_MUSB_TUSB6010) || \
246 defined(CONFIG_USB_MUSB_TUSB6010_MODULE)
245 void __iomem *conf; 247 void __iomem *conf;
246#endif 248#endif
247 249
@@ -258,7 +260,8 @@ struct musb_hw_ep {
258 struct dma_channel *tx_channel; 260 struct dma_channel *tx_channel;
259 struct dma_channel *rx_channel; 261 struct dma_channel *rx_channel;
260 262
261#ifdef CONFIG_USB_MUSB_TUSB6010 263#if defined(CONFIG_USB_MUSB_TUSB6010) || \
264 defined(CONFIG_USB_MUSB_TUSB6010_MODULE)
262 /* TUSB has "asynchronous" and "synchronous" dma modes */ 265 /* TUSB has "asynchronous" and "synchronous" dma modes */
263 dma_addr_t fifo_async; 266 dma_addr_t fifo_async;
264 dma_addr_t fifo_sync; 267 dma_addr_t fifo_sync;
@@ -356,7 +359,8 @@ struct musb {
356 void __iomem *ctrl_base; 359 void __iomem *ctrl_base;
357 void __iomem *mregs; 360 void __iomem *mregs;
358 361
359#ifdef CONFIG_USB_MUSB_TUSB6010 362#if defined(CONFIG_USB_MUSB_TUSB6010) || \
363 defined(CONFIG_USB_MUSB_TUSB6010_MODULE)
360 dma_addr_t async; 364 dma_addr_t async;
361 dma_addr_t sync; 365 dma_addr_t sync;
362 void __iomem *sync_va; 366 void __iomem *sync_va;
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index b67a062f556b..e81820370d6f 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -1698,6 +1698,8 @@ static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
1698 1698
1699 is_on = !!is_on; 1699 is_on = !!is_on;
1700 1700
1701 pm_runtime_get_sync(musb->controller);
1702
1701 /* NOTE: this assumes we are sensing vbus; we'd rather 1703 /* NOTE: this assumes we are sensing vbus; we'd rather
1702 * not pullup unless the B-session is active. 1704 * not pullup unless the B-session is active.
1703 */ 1705 */
@@ -1707,6 +1709,9 @@ static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
1707 musb_pullup(musb, is_on); 1709 musb_pullup(musb, is_on);
1708 } 1710 }
1709 spin_unlock_irqrestore(&musb->lock, flags); 1711 spin_unlock_irqrestore(&musb->lock, flags);
1712
1713 pm_runtime_put(musb->controller);
1714
1710 return 0; 1715 return 0;
1711} 1716}
1712 1717
@@ -1851,6 +1856,7 @@ int __init musb_gadget_setup(struct musb *musb)
1851 1856
1852 return 0; 1857 return 0;
1853err: 1858err:
1859 musb->g.dev.parent = NULL;
1854 device_unregister(&musb->g.dev); 1860 device_unregister(&musb->g.dev);
1855 return status; 1861 return status;
1856} 1862}
@@ -1858,7 +1864,8 @@ err:
1858void musb_gadget_cleanup(struct musb *musb) 1864void musb_gadget_cleanup(struct musb *musb)
1859{ 1865{
1860 usb_del_gadget_udc(&musb->g); 1866 usb_del_gadget_udc(&musb->g);
1861 device_unregister(&musb->g.dev); 1867 if (musb->g.dev.parent)
1868 device_unregister(&musb->g.dev);
1862} 1869}
1863 1870
1864/* 1871/*
diff --git a/drivers/usb/musb/musb_regs.h b/drivers/usb/musb/musb_regs.h
index 82410703dcd3..03f2655af290 100644
--- a/drivers/usb/musb/musb_regs.h
+++ b/drivers/usb/musb/musb_regs.h
@@ -234,7 +234,8 @@
234#define MUSB_TESTMODE 0x0F /* 8 bit */ 234#define MUSB_TESTMODE 0x0F /* 8 bit */
235 235
236/* Get offset for a given FIFO from musb->mregs */ 236/* Get offset for a given FIFO from musb->mregs */
237#ifdef CONFIG_USB_MUSB_TUSB6010 237#if defined(CONFIG_USB_MUSB_TUSB6010) || \
238 defined(CONFIG_USB_MUSB_TUSB6010_MODULE)
238#define MUSB_FIFO_OFFSET(epnum) (0x200 + ((epnum) * 0x20)) 239#define MUSB_FIFO_OFFSET(epnum) (0x200 + ((epnum) * 0x20))
239#else 240#else
240#define MUSB_FIFO_OFFSET(epnum) (0x20 + ((epnum) * 4)) 241#define MUSB_FIFO_OFFSET(epnum) (0x20 + ((epnum) * 4))
@@ -295,7 +296,8 @@
295#define MUSB_FLAT_OFFSET(_epnum, _offset) \ 296#define MUSB_FLAT_OFFSET(_epnum, _offset) \
296 (0x100 + (0x10*(_epnum)) + (_offset)) 297 (0x100 + (0x10*(_epnum)) + (_offset))
297 298
298#ifdef CONFIG_USB_MUSB_TUSB6010 299#if defined(CONFIG_USB_MUSB_TUSB6010) || \
300 defined(CONFIG_USB_MUSB_TUSB6010_MODULE)
299/* TUSB6010 EP0 configuration register is special */ 301/* TUSB6010 EP0 configuration register is special */
300#define MUSB_TUSB_OFFSET(_epnum, _offset) \ 302#define MUSB_TUSB_OFFSET(_epnum, _offset) \
301 (0x10 + _offset) 303 (0x10 + _offset)
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c
index 9eec41fbf3a4..ec1480191f78 100644
--- a/drivers/usb/musb/tusb6010.c
+++ b/drivers/usb/musb/tusb6010.c
@@ -18,6 +18,7 @@
18#include <linux/kernel.h> 18#include <linux/kernel.h>
19#include <linux/errno.h> 19#include <linux/errno.h>
20#include <linux/init.h> 20#include <linux/init.h>
21#include <linux/prefetch.h>
21#include <linux/usb.h> 22#include <linux/usb.h>
22#include <linux/irq.h> 23#include <linux/irq.h>
23#include <linux/platform_device.h> 24#include <linux/platform_device.h>
diff --git a/drivers/usb/musb/tusb6010_omap.c b/drivers/usb/musb/tusb6010_omap.c
index c784e6c03aac..b67b4bc596c1 100644
--- a/drivers/usb/musb/tusb6010_omap.c
+++ b/drivers/usb/musb/tusb6010_omap.c
@@ -20,6 +20,7 @@
20#include <plat/mux.h> 20#include <plat/mux.h>
21 21
22#include "musb_core.h" 22#include "musb_core.h"
23#include "tusb6010.h"
23 24
24#define to_chdat(c) ((struct tusb_omap_dma_ch *)(c)->private_data) 25#define to_chdat(c) ((struct tusb_omap_dma_ch *)(c)->private_data)
25 26
@@ -89,7 +90,7 @@ static inline int tusb_omap_use_shared_dmareq(struct tusb_omap_dma_ch *chdat)
89 u32 reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP); 90 u32 reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP);
90 91
91 if (reg != 0) { 92 if (reg != 0) {
92 dev_dbg(musb->controller, "ep%i dmareq0 is busy for ep%i\n", 93 dev_dbg(chdat->musb->controller, "ep%i dmareq0 is busy for ep%i\n",
93 chdat->epnum, reg & 0xf); 94 chdat->epnum, reg & 0xf);
94 return -EAGAIN; 95 return -EAGAIN;
95 } 96 }
diff --git a/drivers/usb/musb/ux500_dma.c b/drivers/usb/musb/ux500_dma.c
index cecace411832..ef4333f4bbe0 100644
--- a/drivers/usb/musb/ux500_dma.c
+++ b/drivers/usb/musb/ux500_dma.c
@@ -65,7 +65,8 @@ static void ux500_tx_work(struct work_struct *data)
65 struct musb *musb = hw_ep->musb; 65 struct musb *musb = hw_ep->musb;
66 unsigned long flags; 66 unsigned long flags;
67 67
68 DBG(4, "DMA tx transfer done on hw_ep=%d\n", hw_ep->epnum); 68 dev_dbg(musb->controller, "DMA tx transfer done on hw_ep=%d\n",
69 hw_ep->epnum);
69 70
70 spin_lock_irqsave(&musb->lock, flags); 71 spin_lock_irqsave(&musb->lock, flags);
71 ux500_channel->channel.actual_len = ux500_channel->cur_len; 72 ux500_channel->channel.actual_len = ux500_channel->cur_len;
@@ -84,7 +85,8 @@ static void ux500_rx_work(struct work_struct *data)
84 struct musb *musb = hw_ep->musb; 85 struct musb *musb = hw_ep->musb;
85 unsigned long flags; 86 unsigned long flags;
86 87
87 DBG(4, "DMA rx transfer done on hw_ep=%d\n", hw_ep->epnum); 88 dev_dbg(musb->controller, "DMA rx transfer done on hw_ep=%d\n",
89 hw_ep->epnum);
88 90
89 spin_lock_irqsave(&musb->lock, flags); 91 spin_lock_irqsave(&musb->lock, flags);
90 ux500_channel->channel.actual_len = ux500_channel->cur_len; 92 ux500_channel->channel.actual_len = ux500_channel->cur_len;
@@ -116,9 +118,11 @@ static bool ux500_configure_channel(struct dma_channel *channel,
116 enum dma_slave_buswidth addr_width; 118 enum dma_slave_buswidth addr_width;
117 dma_addr_t usb_fifo_addr = (MUSB_FIFO_OFFSET(hw_ep->epnum) + 119 dma_addr_t usb_fifo_addr = (MUSB_FIFO_OFFSET(hw_ep->epnum) +
118 ux500_channel->controller->phy_base); 120 ux500_channel->controller->phy_base);
121 struct musb *musb = ux500_channel->controller->private_data;
119 122
120 DBG(4, "packet_sz=%d, mode=%d, dma_addr=0x%x, len=%d is_tx=%d\n", 123 dev_dbg(musb->controller,
121 packet_sz, mode, dma_addr, len, ux500_channel->is_tx); 124 "packet_sz=%d, mode=%d, dma_addr=0x%x, len=%d is_tx=%d\n",
125 packet_sz, mode, dma_addr, len, ux500_channel->is_tx);
122 126
123 ux500_channel->cur_len = len; 127 ux500_channel->cur_len = len;
124 128
@@ -133,15 +137,13 @@ static bool ux500_configure_channel(struct dma_channel *channel,
133 DMA_SLAVE_BUSWIDTH_4_BYTES; 137 DMA_SLAVE_BUSWIDTH_4_BYTES;
134 138
135 slave_conf.direction = direction; 139 slave_conf.direction = direction;
136 if (direction == DMA_FROM_DEVICE) { 140 slave_conf.src_addr = usb_fifo_addr;
137 slave_conf.src_addr = usb_fifo_addr; 141 slave_conf.src_addr_width = addr_width;
138 slave_conf.src_addr_width = addr_width; 142 slave_conf.src_maxburst = 16;
139 slave_conf.src_maxburst = 16; 143 slave_conf.dst_addr = usb_fifo_addr;
140 } else { 144 slave_conf.dst_addr_width = addr_width;
141 slave_conf.dst_addr = usb_fifo_addr; 145 slave_conf.dst_maxburst = 16;
142 slave_conf.dst_addr_width = addr_width; 146
143 slave_conf.dst_maxburst = 16;
144 }
145 dma_chan->device->device_control(dma_chan, DMA_SLAVE_CONFIG, 147 dma_chan->device->device_control(dma_chan, DMA_SLAVE_CONFIG,
146 (unsigned long) &slave_conf); 148 (unsigned long) &slave_conf);
147 149
@@ -166,6 +168,7 @@ static struct dma_channel *ux500_dma_channel_allocate(struct dma_controller *c,
166 struct ux500_dma_controller *controller = container_of(c, 168 struct ux500_dma_controller *controller = container_of(c,
167 struct ux500_dma_controller, controller); 169 struct ux500_dma_controller, controller);
168 struct ux500_dma_channel *ux500_channel = NULL; 170 struct ux500_dma_channel *ux500_channel = NULL;
171 struct musb *musb = controller->private_data;
169 u8 ch_num = hw_ep->epnum - 1; 172 u8 ch_num = hw_ep->epnum - 1;
170 u32 max_ch; 173 u32 max_ch;
171 174
@@ -192,7 +195,7 @@ static struct dma_channel *ux500_dma_channel_allocate(struct dma_controller *c,
192 ux500_channel->hw_ep = hw_ep; 195 ux500_channel->hw_ep = hw_ep;
193 ux500_channel->is_allocated = 1; 196 ux500_channel->is_allocated = 1;
194 197
195 DBG(7, "hw_ep=%d, is_tx=0x%x, channel=%d\n", 198 dev_dbg(musb->controller, "hw_ep=%d, is_tx=0x%x, channel=%d\n",
196 hw_ep->epnum, is_tx, ch_num); 199 hw_ep->epnum, is_tx, ch_num);
197 200
198 return &(ux500_channel->channel); 201 return &(ux500_channel->channel);
@@ -201,8 +204,9 @@ static struct dma_channel *ux500_dma_channel_allocate(struct dma_controller *c,
201static void ux500_dma_channel_release(struct dma_channel *channel) 204static void ux500_dma_channel_release(struct dma_channel *channel)
202{ 205{
203 struct ux500_dma_channel *ux500_channel = channel->private_data; 206 struct ux500_dma_channel *ux500_channel = channel->private_data;
207 struct musb *musb = ux500_channel->controller->private_data;
204 208
205 DBG(7, "channel=%d\n", ux500_channel->ch_num); 209 dev_dbg(musb->controller, "channel=%d\n", ux500_channel->ch_num);
206 210
207 if (ux500_channel->is_allocated) { 211 if (ux500_channel->is_allocated) {
208 ux500_channel->is_allocated = 0; 212 ux500_channel->is_allocated = 0;
@@ -252,8 +256,8 @@ static int ux500_dma_channel_abort(struct dma_channel *channel)
252 void __iomem *epio = musb->endpoints[ux500_channel->hw_ep->epnum].regs; 256 void __iomem *epio = musb->endpoints[ux500_channel->hw_ep->epnum].regs;
253 u16 csr; 257 u16 csr;
254 258
255 DBG(4, "channel=%d, is_tx=%d\n", ux500_channel->ch_num, 259 dev_dbg(musb->controller, "channel=%d, is_tx=%d\n",
256 ux500_channel->is_tx); 260 ux500_channel->ch_num, ux500_channel->is_tx);
257 261
258 if (channel->status == MUSB_DMA_STATUS_BUSY) { 262 if (channel->status == MUSB_DMA_STATUS_BUSY) {
259 if (ux500_channel->is_tx) { 263 if (ux500_channel->is_tx) {
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index ba79dbf5adbc..cb2d451d511e 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -14,6 +14,7 @@
14 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 14 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
15 * 15 *
16 */ 16 */
17#include <linux/dma-mapping.h>
17#include <linux/io.h> 18#include <linux/io.h>
18#include <linux/module.h> 19#include <linux/module.h>
19#include <linux/platform_device.h> 20#include <linux/platform_device.h>
@@ -76,7 +77,7 @@ struct usbhsg_recip_handle {
76 struct usbhsg_gpriv, mod) 77 struct usbhsg_gpriv, mod)
77 78
78#define __usbhsg_for_each_uep(start, pos, g, i) \ 79#define __usbhsg_for_each_uep(start, pos, g, i) \
79 for (i = start, pos = (g)->uep; \ 80 for (i = start, pos = (g)->uep + i; \
80 i < (g)->uep_size; \ 81 i < (g)->uep_size; \
81 i++, pos = (g)->uep + i) 82 i++, pos = (g)->uep + i)
82 83
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 2e06b90aa1f8..5fc13e717911 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -101,6 +101,7 @@ static int ftdi_jtag_probe(struct usb_serial *serial);
101static int ftdi_mtxorb_hack_setup(struct usb_serial *serial); 101static int ftdi_mtxorb_hack_setup(struct usb_serial *serial);
102static int ftdi_NDI_device_setup(struct usb_serial *serial); 102static int ftdi_NDI_device_setup(struct usb_serial *serial);
103static int ftdi_stmclite_probe(struct usb_serial *serial); 103static int ftdi_stmclite_probe(struct usb_serial *serial);
104static int ftdi_8u2232c_probe(struct usb_serial *serial);
104static void ftdi_USB_UIRT_setup(struct ftdi_private *priv); 105static void ftdi_USB_UIRT_setup(struct ftdi_private *priv);
105static void ftdi_HE_TIRA1_setup(struct ftdi_private *priv); 106static void ftdi_HE_TIRA1_setup(struct ftdi_private *priv);
106 107
@@ -128,6 +129,10 @@ static struct ftdi_sio_quirk ftdi_stmclite_quirk = {
128 .probe = ftdi_stmclite_probe, 129 .probe = ftdi_stmclite_probe,
129}; 130};
130 131
132static struct ftdi_sio_quirk ftdi_8u2232c_quirk = {
133 .probe = ftdi_8u2232c_probe,
134};
135
131/* 136/*
132 * The 8U232AM has the same API as the sio except for: 137 * The 8U232AM has the same API as the sio except for:
133 * - it can support MUCH higher baudrates; up to: 138 * - it can support MUCH higher baudrates; up to:
@@ -151,6 +156,7 @@ static struct ftdi_sio_quirk ftdi_stmclite_quirk = {
151 * /sys/bus/usb/ftdi_sio/new_id, then send patch/report! 156 * /sys/bus/usb/ftdi_sio/new_id, then send patch/report!
152 */ 157 */
153static struct usb_device_id id_table_combined [] = { 158static struct usb_device_id id_table_combined [] = {
159 { USB_DEVICE(FTDI_VID, FTDI_ZEITCONTROL_TAGTRACE_MIFARE_PID) },
154 { USB_DEVICE(FTDI_VID, FTDI_CTI_MINI_PID) }, 160 { USB_DEVICE(FTDI_VID, FTDI_CTI_MINI_PID) },
155 { USB_DEVICE(FTDI_VID, FTDI_CTI_NANO_PID) }, 161 { USB_DEVICE(FTDI_VID, FTDI_CTI_NANO_PID) },
156 { USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) }, 162 { USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) },
@@ -177,7 +183,8 @@ static struct usb_device_id id_table_combined [] = {
177 { USB_DEVICE(FTDI_VID, FTDI_8U232AM_PID) }, 183 { USB_DEVICE(FTDI_VID, FTDI_8U232AM_PID) },
178 { USB_DEVICE(FTDI_VID, FTDI_8U232AM_ALT_PID) }, 184 { USB_DEVICE(FTDI_VID, FTDI_8U232AM_ALT_PID) },
179 { USB_DEVICE(FTDI_VID, FTDI_232RL_PID) }, 185 { USB_DEVICE(FTDI_VID, FTDI_232RL_PID) },
180 { USB_DEVICE(FTDI_VID, FTDI_8U2232C_PID) }, 186 { USB_DEVICE(FTDI_VID, FTDI_8U2232C_PID) ,
187 .driver_info = (kernel_ulong_t)&ftdi_8u2232c_quirk },
181 { USB_DEVICE(FTDI_VID, FTDI_4232H_PID) }, 188 { USB_DEVICE(FTDI_VID, FTDI_4232H_PID) },
182 { USB_DEVICE(FTDI_VID, FTDI_232H_PID) }, 189 { USB_DEVICE(FTDI_VID, FTDI_232H_PID) },
183 { USB_DEVICE(FTDI_VID, FTDI_MICRO_CHAMELEON_PID) }, 190 { USB_DEVICE(FTDI_VID, FTDI_MICRO_CHAMELEON_PID) },
@@ -1171,7 +1178,7 @@ static __u32 get_ftdi_divisor(struct tty_struct *tty,
1171 case FT2232H: /* FT2232H chip */ 1178 case FT2232H: /* FT2232H chip */
1172 case FT4232H: /* FT4232H chip */ 1179 case FT4232H: /* FT4232H chip */
1173 case FT232H: /* FT232H chip */ 1180 case FT232H: /* FT232H chip */
1174 if ((baud <= 12000000) & (baud >= 1200)) { 1181 if ((baud <= 12000000) && (baud >= 1200)) {
1175 div_value = ftdi_2232h_baud_to_divisor(baud); 1182 div_value = ftdi_2232h_baud_to_divisor(baud);
1176 } else if (baud < 1200) { 1183 } else if (baud < 1200) {
1177 div_value = ftdi_232bm_baud_to_divisor(baud); 1184 div_value = ftdi_232bm_baud_to_divisor(baud);
@@ -1205,7 +1212,10 @@ static int change_speed(struct tty_struct *tty, struct usb_serial_port *port)
1205 urb_index_value = get_ftdi_divisor(tty, port); 1212 urb_index_value = get_ftdi_divisor(tty, port);
1206 urb_value = (__u16)urb_index_value; 1213 urb_value = (__u16)urb_index_value;
1207 urb_index = (__u16)(urb_index_value >> 16); 1214 urb_index = (__u16)(urb_index_value >> 16);
1208 if (priv->interface) { /* FT2232C */ 1215 if ((priv->chip_type == FT2232C) || (priv->chip_type == FT2232H) ||
1216 (priv->chip_type == FT4232H) || (priv->chip_type == FT232H)) {
1217 /* Probably the BM type needs the MSB of the encoded fractional
1218 * divider also moved like for the chips above. Any infos? */
1209 urb_index = (__u16)((urb_index << 8) | priv->interface); 1219 urb_index = (__u16)((urb_index << 8) | priv->interface);
1210 } 1220 }
1211 1221
@@ -1733,6 +1743,18 @@ static int ftdi_jtag_probe(struct usb_serial *serial)
1733 return 0; 1743 return 0;
1734} 1744}
1735 1745
1746static int ftdi_8u2232c_probe(struct usb_serial *serial)
1747{
1748 struct usb_device *udev = serial->dev;
1749
1750 dbg("%s", __func__);
1751
1752 if (strcmp(udev->manufacturer, "CALAO Systems") == 0)
1753 return ftdi_jtag_probe(serial);
1754
1755 return 0;
1756}
1757
1736/* 1758/*
1737 * First and second port on STMCLiteadaptors is reserved for JTAG interface 1759 * First and second port on STMCLiteadaptors is reserved for JTAG interface
1738 * and the forth port for pio 1760 * and the forth port for pio
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 19156d1049fe..bf5227ad3ef7 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -1159,4 +1159,8 @@
1159/* USB-Nano-485*/ 1159/* USB-Nano-485*/
1160#define FTDI_CTI_NANO_PID 0xF60B 1160#define FTDI_CTI_NANO_PID 0xF60B
1161 1161
1162 1162/*
1163 * ZeitControl cardsystems GmbH rfid-readers http://zeitconrol.de
1164 */
1165/* TagTracer MIFARE*/
1166#define FTDI_ZEITCONTROL_TAGTRACE_MIFARE_PID 0xF7C0
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 60b25d8ea0e2..fe22e90bc879 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -148,6 +148,12 @@ static void option_instat_callback(struct urb *urb);
148#define HUAWEI_PRODUCT_K4505 0x1464 148#define HUAWEI_PRODUCT_K4505 0x1464
149#define HUAWEI_PRODUCT_K3765 0x1465 149#define HUAWEI_PRODUCT_K3765 0x1465
150#define HUAWEI_PRODUCT_E14AC 0x14AC 150#define HUAWEI_PRODUCT_E14AC 0x14AC
151#define HUAWEI_PRODUCT_K3806 0x14AE
152#define HUAWEI_PRODUCT_K4605 0x14C6
153#define HUAWEI_PRODUCT_K3770 0x14C9
154#define HUAWEI_PRODUCT_K3771 0x14CA
155#define HUAWEI_PRODUCT_K4510 0x14CB
156#define HUAWEI_PRODUCT_K4511 0x14CC
151#define HUAWEI_PRODUCT_ETS1220 0x1803 157#define HUAWEI_PRODUCT_ETS1220 0x1803
152#define HUAWEI_PRODUCT_E353 0x1506 158#define HUAWEI_PRODUCT_E353 0x1506
153 159
@@ -412,6 +418,56 @@ static void option_instat_callback(struct urb *urb);
412#define SAMSUNG_VENDOR_ID 0x04e8 418#define SAMSUNG_VENDOR_ID 0x04e8
413#define SAMSUNG_PRODUCT_GT_B3730 0x6889 419#define SAMSUNG_PRODUCT_GT_B3730 0x6889
414 420
421/* YUGA products www.yuga-info.com*/
422#define YUGA_VENDOR_ID 0x257A
423#define YUGA_PRODUCT_CEM600 0x1601
424#define YUGA_PRODUCT_CEM610 0x1602
425#define YUGA_PRODUCT_CEM500 0x1603
426#define YUGA_PRODUCT_CEM510 0x1604
427#define YUGA_PRODUCT_CEM800 0x1605
428#define YUGA_PRODUCT_CEM900 0x1606
429
430#define YUGA_PRODUCT_CEU818 0x1607
431#define YUGA_PRODUCT_CEU816 0x1608
432#define YUGA_PRODUCT_CEU828 0x1609
433#define YUGA_PRODUCT_CEU826 0x160A
434#define YUGA_PRODUCT_CEU518 0x160B
435#define YUGA_PRODUCT_CEU516 0x160C
436#define YUGA_PRODUCT_CEU528 0x160D
437#define YUGA_PRODUCT_CEU526 0x160F
438
439#define YUGA_PRODUCT_CWM600 0x2601
440#define YUGA_PRODUCT_CWM610 0x2602
441#define YUGA_PRODUCT_CWM500 0x2603
442#define YUGA_PRODUCT_CWM510 0x2604
443#define YUGA_PRODUCT_CWM800 0x2605
444#define YUGA_PRODUCT_CWM900 0x2606
445
446#define YUGA_PRODUCT_CWU718 0x2607
447#define YUGA_PRODUCT_CWU716 0x2608
448#define YUGA_PRODUCT_CWU728 0x2609
449#define YUGA_PRODUCT_CWU726 0x260A
450#define YUGA_PRODUCT_CWU518 0x260B
451#define YUGA_PRODUCT_CWU516 0x260C
452#define YUGA_PRODUCT_CWU528 0x260D
453#define YUGA_PRODUCT_CWU526 0x260F
454
455#define YUGA_PRODUCT_CLM600 0x2601
456#define YUGA_PRODUCT_CLM610 0x2602
457#define YUGA_PRODUCT_CLM500 0x2603
458#define YUGA_PRODUCT_CLM510 0x2604
459#define YUGA_PRODUCT_CLM800 0x2605
460#define YUGA_PRODUCT_CLM900 0x2606
461
462#define YUGA_PRODUCT_CLU718 0x2607
463#define YUGA_PRODUCT_CLU716 0x2608
464#define YUGA_PRODUCT_CLU728 0x2609
465#define YUGA_PRODUCT_CLU726 0x260A
466#define YUGA_PRODUCT_CLU518 0x260B
467#define YUGA_PRODUCT_CLU516 0x260C
468#define YUGA_PRODUCT_CLU528 0x260D
469#define YUGA_PRODUCT_CLU526 0x260F
470
415/* some devices interfaces need special handling due to a number of reasons */ 471/* some devices interfaces need special handling due to a number of reasons */
416enum option_blacklist_reason { 472enum option_blacklist_reason {
417 OPTION_BLACKLIST_NONE = 0, 473 OPTION_BLACKLIST_NONE = 0,
@@ -547,6 +603,16 @@ static const struct usb_device_id option_ids[] = {
547 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff) }, 603 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff) },
548 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) }, 604 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) },
549 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC, 0xff, 0xff, 0xff) }, 605 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC, 0xff, 0xff, 0xff) },
606 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3806, 0xff, 0xff, 0xff) },
607 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0xff, 0xff) },
608 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x31) },
609 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x32) },
610 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3771, 0xff, 0x02, 0x31) },
611 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3771, 0xff, 0x02, 0x32) },
612 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4510, 0xff, 0x01, 0x31) },
613 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4510, 0xff, 0x01, 0x32) },
614 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x31) },
615 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x32) },
550 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x01) }, 616 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x01) },
551 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) }, 617 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
552 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) }, 618 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) },
@@ -993,6 +1059,48 @@ static const struct usb_device_id option_ids[] = {
993 { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */ 1059 { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
994 { USB_DEVICE(ONDA_VENDOR_ID, ONDA_MT825UP) }, /* ONDA MT825UP modem */ 1060 { USB_DEVICE(ONDA_VENDOR_ID, ONDA_MT825UP) }, /* ONDA MT825UP modem */
995 { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/ 1061 { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/
1062 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) },
1063 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM610) },
1064 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM500) },
1065 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM510) },
1066 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM800) },
1067 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM900) },
1068 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU818) },
1069 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU816) },
1070 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU828) },
1071 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU826) },
1072 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU518) },
1073 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU516) },
1074 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU528) },
1075 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU526) },
1076 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWM600) },
1077 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWM610) },
1078 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWM500) },
1079 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWM510) },
1080 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWM800) },
1081 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWM900) },
1082 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU718) },
1083 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU716) },
1084 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU728) },
1085 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU726) },
1086 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU518) },
1087 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU516) },
1088 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU528) },
1089 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU526) },
1090 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLM600) },
1091 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLM610) },
1092 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLM500) },
1093 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLM510) },
1094 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLM800) },
1095 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLM900) },
1096 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU718) },
1097 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU716) },
1098 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU728) },
1099 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU726) },
1100 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU518) },
1101 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU516) },
1102 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU528) },
1103 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU526) },
996 { } /* Terminating entry */ 1104 { } /* Terminating entry */
997}; 1105};
998MODULE_DEVICE_TABLE(usb, option_ids); 1106MODULE_DEVICE_TABLE(usb, option_ids);
@@ -1122,11 +1230,13 @@ static int option_probe(struct usb_serial *serial,
1122 serial->interface->cur_altsetting->desc.bInterfaceClass != 0xff) 1230 serial->interface->cur_altsetting->desc.bInterfaceClass != 0xff)
1123 return -ENODEV; 1231 return -ENODEV;
1124 1232
1125 /* Don't bind network interfaces on Huawei K3765 & K4505 */ 1233 /* Don't bind network interfaces on Huawei K3765, K4505 & K4605 */
1126 if (serial->dev->descriptor.idVendor == HUAWEI_VENDOR_ID && 1234 if (serial->dev->descriptor.idVendor == HUAWEI_VENDOR_ID &&
1127 (serial->dev->descriptor.idProduct == HUAWEI_PRODUCT_K3765 || 1235 (serial->dev->descriptor.idProduct == HUAWEI_PRODUCT_K3765 ||
1128 serial->dev->descriptor.idProduct == HUAWEI_PRODUCT_K4505) && 1236 serial->dev->descriptor.idProduct == HUAWEI_PRODUCT_K4505 ||
1129 serial->interface->cur_altsetting->desc.bInterfaceNumber == 1) 1237 serial->dev->descriptor.idProduct == HUAWEI_PRODUCT_K4605) &&
1238 (serial->interface->cur_altsetting->desc.bInterfaceNumber == 1 ||
1239 serial->interface->cur_altsetting->desc.bInterfaceNumber == 2))
1130 return -ENODEV; 1240 return -ENODEV;
1131 1241
1132 /* Don't bind network interface on Samsung GT-B3730, it is handled by a separate module */ 1242 /* Don't bind network interface on Samsung GT-B3730, it is handled by a separate module */
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 54a9dab1f33b..aeccc7f0a93c 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -45,6 +45,7 @@ static const struct usb_device_id id_table[] = {
45 {USB_DEVICE(0x05c6, 0x9203)}, /* Generic Gobi Modem device */ 45 {USB_DEVICE(0x05c6, 0x9203)}, /* Generic Gobi Modem device */
46 {USB_DEVICE(0x05c6, 0x9222)}, /* Generic Gobi Modem device */ 46 {USB_DEVICE(0x05c6, 0x9222)}, /* Generic Gobi Modem device */
47 {USB_DEVICE(0x05c6, 0x9008)}, /* Generic Gobi QDL device */ 47 {USB_DEVICE(0x05c6, 0x9008)}, /* Generic Gobi QDL device */
48 {USB_DEVICE(0x05c6, 0x9009)}, /* Generic Gobi Modem device */
48 {USB_DEVICE(0x05c6, 0x9201)}, /* Generic Gobi QDL device */ 49 {USB_DEVICE(0x05c6, 0x9201)}, /* Generic Gobi QDL device */
49 {USB_DEVICE(0x05c6, 0x9221)}, /* Generic Gobi QDL device */ 50 {USB_DEVICE(0x05c6, 0x9221)}, /* Generic Gobi QDL device */
50 {USB_DEVICE(0x05c6, 0x9231)}, /* Generic Gobi QDL device */ 51 {USB_DEVICE(0x05c6, 0x9231)}, /* Generic Gobi QDL device */
@@ -78,6 +79,7 @@ static const struct usb_device_id id_table[] = {
78 {USB_DEVICE(0x1199, 0x9008)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ 79 {USB_DEVICE(0x1199, 0x9008)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
79 {USB_DEVICE(0x1199, 0x9009)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ 80 {USB_DEVICE(0x1199, 0x9009)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
80 {USB_DEVICE(0x1199, 0x900a)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ 81 {USB_DEVICE(0x1199, 0x900a)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
82 {USB_DEVICE(0x1199, 0x9011)}, /* Sierra Wireless Gobi 2000 Modem device (MC8305) */
81 {USB_DEVICE(0x16d8, 0x8001)}, /* CMDTech Gobi 2000 QDL device (VU922) */ 83 {USB_DEVICE(0x16d8, 0x8001)}, /* CMDTech Gobi 2000 QDL device (VU922) */
82 {USB_DEVICE(0x16d8, 0x8002)}, /* CMDTech Gobi 2000 Modem device (VU922) */ 84 {USB_DEVICE(0x16d8, 0x8002)}, /* CMDTech Gobi 2000 Modem device (VU922) */
83 {USB_DEVICE(0x05c6, 0x9204)}, /* Gobi 2000 QDL device */ 85 {USB_DEVICE(0x05c6, 0x9204)}, /* Gobi 2000 QDL device */
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index ccff3483eebc..3041a974faf3 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1988,6 +1988,16 @@ UNUSUAL_DEV( 0x4146, 0xba01, 0x0100, 0x0100,
1988 "Micro Mini 1GB", 1988 "Micro Mini 1GB",
1989 USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_NOT_LOCKABLE ), 1989 USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_NOT_LOCKABLE ),
1990 1990
1991/*
1992 * Nick Bowler <nbowler@elliptictech.com>
1993 * SCSI stack spams (otherwise harmless) error messages.
1994 */
1995UNUSUAL_DEV( 0xc251, 0x4003, 0x0100, 0x0100,
1996 "Keil Software, Inc.",
1997 "V2M MotherBoard",
1998 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1999 US_FL_NOT_LOCKABLE),
2000
1991/* Reported by Andrew Simmons <andrew.simmons@gmail.com> */ 2001/* Reported by Andrew Simmons <andrew.simmons@gmail.com> */
1992UNUSUAL_DEV( 0xed06, 0x4500, 0x0001, 0x0001, 2002UNUSUAL_DEV( 0xed06, 0x4500, 0x0001, 0x0001,
1993 "DataStor", 2003 "DataStor",
diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
index 817ab60f7537..dda920623c6a 100644
--- a/drivers/video/atmel_lcdfb.c
+++ b/drivers/video/atmel_lcdfb.c
@@ -21,7 +21,7 @@
21 21
22#include <mach/board.h> 22#include <mach/board.h>
23#include <mach/cpu.h> 23#include <mach/cpu.h>
24#include <mach/gpio.h> 24#include <asm/gpio.h>
25 25
26#include <video/atmel_lcdc.h> 26#include <video/atmel_lcdc.h>
27 27
diff --git a/drivers/video/backlight/adp8870_bl.c b/drivers/video/backlight/adp8870_bl.c
index 05a8832bb3eb..d06886a2bfb5 100644
--- a/drivers/video/backlight/adp8870_bl.c
+++ b/drivers/video/backlight/adp8870_bl.c
@@ -1009,4 +1009,4 @@ module_exit(adp8870_exit);
1009MODULE_LICENSE("GPL v2"); 1009MODULE_LICENSE("GPL v2");
1010MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>"); 1010MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
1011MODULE_DESCRIPTION("ADP8870 Backlight driver"); 1011MODULE_DESCRIPTION("ADP8870 Backlight driver");
1012MODULE_ALIAS("platform:adp8870-backlight"); 1012MODULE_ALIAS("i2c:adp8870-backlight");
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
index 80d292fb92d8..7363c1b169e8 100644
--- a/drivers/video/backlight/backlight.c
+++ b/drivers/video/backlight/backlight.c
@@ -19,7 +19,7 @@
19#include <asm/backlight.h> 19#include <asm/backlight.h>
20#endif 20#endif
21 21
22static const char const *backlight_types[] = { 22static const char *const backlight_types[] = {
23 [BACKLIGHT_RAW] = "raw", 23 [BACKLIGHT_RAW] = "raw",
24 [BACKLIGHT_PLATFORM] = "platform", 24 [BACKLIGHT_PLATFORM] = "platform",
25 [BACKLIGHT_FIRMWARE] = "firmware", 25 [BACKLIGHT_FIRMWARE] = "firmware",
diff --git a/drivers/video/backlight/ep93xx_bl.c b/drivers/video/backlight/ep93xx_bl.c
index 9f1e389d51d2..b0582917f0c8 100644
--- a/drivers/video/backlight/ep93xx_bl.c
+++ b/drivers/video/backlight/ep93xx_bl.c
@@ -11,7 +11,7 @@
11 * BRIGHT, on the Cirrus EP9307, EP9312, and EP9315 processors. 11 * BRIGHT, on the Cirrus EP9307, EP9312, and EP9315 processors.
12 */ 12 */
13 13
14 14#include <linux/module.h>
15#include <linux/platform_device.h> 15#include <linux/platform_device.h>
16#include <linux/io.h> 16#include <linux/io.h>
17#include <linux/fb.h> 17#include <linux/fb.h>
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index b8f38ec6eb18..8b5b2a4124c7 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -28,6 +28,8 @@ struct pwm_bl_data {
28 unsigned int lth_brightness; 28 unsigned int lth_brightness;
29 int (*notify)(struct device *, 29 int (*notify)(struct device *,
30 int brightness); 30 int brightness);
31 void (*notify_after)(struct device *,
32 int brightness);
31 int (*check_fb)(struct device *, struct fb_info *); 33 int (*check_fb)(struct device *, struct fb_info *);
32}; 34};
33 35
@@ -55,6 +57,10 @@ static int pwm_backlight_update_status(struct backlight_device *bl)
55 pwm_config(pb->pwm, brightness, pb->period); 57 pwm_config(pb->pwm, brightness, pb->period);
56 pwm_enable(pb->pwm); 58 pwm_enable(pb->pwm);
57 } 59 }
60
61 if (pb->notify_after)
62 pb->notify_after(pb->dev, brightness);
63
58 return 0; 64 return 0;
59} 65}
60 66
@@ -105,6 +111,7 @@ static int pwm_backlight_probe(struct platform_device *pdev)
105 111
106 pb->period = data->pwm_period_ns; 112 pb->period = data->pwm_period_ns;
107 pb->notify = data->notify; 113 pb->notify = data->notify;
114 pb->notify_after = data->notify_after;
108 pb->check_fb = data->check_fb; 115 pb->check_fb = data->check_fb;
109 pb->lth_brightness = data->lth_brightness * 116 pb->lth_brightness = data->lth_brightness *
110 (data->pwm_period_ns / data->max_brightness); 117 (data->pwm_period_ns / data->max_brightness);
@@ -172,6 +179,8 @@ static int pwm_backlight_suspend(struct platform_device *pdev,
172 pb->notify(pb->dev, 0); 179 pb->notify(pb->dev, 0);
173 pwm_config(pb->pwm, 0, pb->period); 180 pwm_config(pb->pwm, 0, pb->period);
174 pwm_disable(pb->pwm); 181 pwm_disable(pb->pwm);
182 if (pb->notify_after)
183 pb->notify_after(pb->dev, 0);
175 return 0; 184 return 0;
176} 185}
177 186
diff --git a/drivers/video/omap/lcd_apollon.c b/drivers/video/omap/lcd_apollon.c
index 10459d8bd9a0..4b24f549f9b9 100644
--- a/drivers/video/omap/lcd_apollon.c
+++ b/drivers/video/omap/lcd_apollon.c
@@ -24,7 +24,7 @@
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/platform_device.h> 25#include <linux/platform_device.h>
26 26
27#include <mach/gpio.h> 27#include <asm/gpio.h>
28 28
29#include "omapfb.h" 29#include "omapfb.h"
30 30
diff --git a/drivers/video/omap/lcd_h3.c b/drivers/video/omap/lcd_h3.c
index 8df688748b5a..622ad839fd9d 100644
--- a/drivers/video/omap/lcd_h3.c
+++ b/drivers/video/omap/lcd_h3.c
@@ -23,7 +23,7 @@
23#include <linux/platform_device.h> 23#include <linux/platform_device.h>
24#include <linux/i2c/tps65010.h> 24#include <linux/i2c/tps65010.h>
25 25
26#include <mach/gpio.h> 26#include <asm/gpio.h>
27#include "omapfb.h" 27#include "omapfb.h"
28 28
29#define MODULE_NAME "omapfb-lcd_h3" 29#define MODULE_NAME "omapfb-lcd_h3"
diff --git a/drivers/video/omap/lcd_inn1610.c b/drivers/video/omap/lcd_inn1610.c
index 9fff86f67bde..12cc52a70f96 100644
--- a/drivers/video/omap/lcd_inn1610.c
+++ b/drivers/video/omap/lcd_inn1610.c
@@ -22,7 +22,7 @@
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/platform_device.h> 23#include <linux/platform_device.h>
24 24
25#include <mach/gpio.h> 25#include <asm/gpio.h>
26#include "omapfb.h" 26#include "omapfb.h"
27 27
28#define MODULE_NAME "omapfb-lcd_h3" 28#define MODULE_NAME "omapfb-lcd_h3"
diff --git a/drivers/video/omap/lcd_ldp.c b/drivers/video/omap/lcd_ldp.c
index 0f5952cae85e..062466402c0e 100644
--- a/drivers/video/omap/lcd_ldp.c
+++ b/drivers/video/omap/lcd_ldp.c
@@ -26,7 +26,7 @@
26#include <linux/delay.h> 26#include <linux/delay.h>
27#include <linux/i2c/twl.h> 27#include <linux/i2c/twl.h>
28 28
29#include <mach/gpio.h> 29#include <asm/gpio.h>
30#include <plat/mux.h> 30#include <plat/mux.h>
31#include <asm/mach-types.h> 31#include <asm/mach-types.h>
32 32
diff --git a/drivers/video/omap/lcd_osk.c b/drivers/video/omap/lcd_osk.c
index b87e8b83f29c..6f8d13c41202 100644
--- a/drivers/video/omap/lcd_osk.c
+++ b/drivers/video/omap/lcd_osk.c
@@ -23,7 +23,7 @@
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/platform_device.h> 24#include <linux/platform_device.h>
25 25
26#include <mach/gpio.h> 26#include <asm/gpio.h>
27#include <plat/mux.h> 27#include <plat/mux.h>
28#include "omapfb.h" 28#include "omapfb.h"
29 29
diff --git a/drivers/video/omap/lcd_overo.c b/drivers/video/omap/lcd_overo.c
index 564933ffac6e..b8fd5b2ec29c 100644
--- a/drivers/video/omap/lcd_overo.c
+++ b/drivers/video/omap/lcd_overo.c
@@ -23,7 +23,7 @@
23#include <linux/platform_device.h> 23#include <linux/platform_device.h>
24#include <linux/i2c/twl.h> 24#include <linux/i2c/twl.h>
25 25
26#include <mach/gpio.h> 26#include <asm/gpio.h>
27#include <plat/mux.h> 27#include <plat/mux.h>
28#include <asm/mach-types.h> 28#include <asm/mach-types.h>
29 29
diff --git a/drivers/video/omap/lcd_palmtt.c b/drivers/video/omap/lcd_palmtt.c
index ff0e6d7ab3a2..b51b332e5a2b 100644
--- a/drivers/video/omap/lcd_palmtt.c
+++ b/drivers/video/omap/lcd_palmtt.c
@@ -29,7 +29,7 @@ GPIO13 - screen blanking
29#include <linux/module.h> 29#include <linux/module.h>
30#include <linux/io.h> 30#include <linux/io.h>
31 31
32#include <mach/gpio.h> 32#include <asm/gpio.h>
33#include "omapfb.h" 33#include "omapfb.h"
34 34
35static int palmtt_panel_init(struct lcd_panel *panel, 35static int palmtt_panel_init(struct lcd_panel *panel,
diff --git a/drivers/video/pnx4008/sdum.c b/drivers/video/pnx4008/sdum.c
index 5ec4f2d439c9..50e00395240f 100644
--- a/drivers/video/pnx4008/sdum.c
+++ b/drivers/video/pnx4008/sdum.c
@@ -30,7 +30,7 @@
30#include <linux/clk.h> 30#include <linux/clk.h>
31#include <linux/gfp.h> 31#include <linux/gfp.h>
32#include <asm/uaccess.h> 32#include <asm/uaccess.h>
33#include <mach/gpio.h> 33#include <asm/gpio.h>
34 34
35#include "sdum.h" 35#include "sdum.h"
36#include "fbcommon.h" 36#include "fbcommon.h"
diff --git a/drivers/w1/masters/ds2490.c b/drivers/w1/masters/ds2490.c
index 02bf7bf7160b..b5abaae38e97 100644
--- a/drivers/w1/masters/ds2490.c
+++ b/drivers/w1/masters/ds2490.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * dscore.c 2 * dscore.c
3 * 3 *
4 * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> 4 * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
5 * 5 *
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
@@ -1024,5 +1024,5 @@ module_init(ds_init);
1024module_exit(ds_fini); 1024module_exit(ds_fini);
1025 1025
1026MODULE_LICENSE("GPL"); 1026MODULE_LICENSE("GPL");
1027MODULE_AUTHOR("Evgeniy Polyakov <johnpol@2ka.mipt.ru>"); 1027MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
1028MODULE_DESCRIPTION("DS2490 USB <-> W1 bus master driver (DS9490*)"); 1028MODULE_DESCRIPTION("DS2490 USB <-> W1 bus master driver (DS9490*)");
diff --git a/drivers/w1/masters/matrox_w1.c b/drivers/w1/masters/matrox_w1.c
index 334d1ccf9c92..f667c26b2195 100644
--- a/drivers/w1/masters/matrox_w1.c
+++ b/drivers/w1/masters/matrox_w1.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * matrox_w1.c 2 * matrox_w1.c
3 * 3 *
4 * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> 4 * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
5 * 5 *
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
@@ -39,7 +39,7 @@
39#include "../w1_log.h" 39#include "../w1_log.h"
40 40
41MODULE_LICENSE("GPL"); 41MODULE_LICENSE("GPL");
42MODULE_AUTHOR("Evgeniy Polyakov <johnpol@2ka.mipt.ru>"); 42MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
43MODULE_DESCRIPTION("Driver for transport(Dallas 1-wire prtocol) over VGA DDC(matrox gpio)."); 43MODULE_DESCRIPTION("Driver for transport(Dallas 1-wire prtocol) over VGA DDC(matrox gpio).");
44 44
45static struct pci_device_id matrox_w1_tbl[] = { 45static struct pci_device_id matrox_w1_tbl[] = {
diff --git a/drivers/w1/slaves/w1_ds2408.c b/drivers/w1/slaves/w1_ds2408.c
index c37781899d90..7c8cdb8aed26 100644
--- a/drivers/w1/slaves/w1_ds2408.c
+++ b/drivers/w1/slaves/w1_ds2408.c
@@ -373,7 +373,7 @@ static int w1_f29_add_slave(struct w1_slave *sl)
373static void w1_f29_remove_slave(struct w1_slave *sl) 373static void w1_f29_remove_slave(struct w1_slave *sl)
374{ 374{
375 int i; 375 int i;
376 for (i = NB_SYSFS_BIN_FILES; i <= 0; --i) 376 for (i = NB_SYSFS_BIN_FILES - 1; i >= 0; --i)
377 sysfs_remove_bin_file(&sl->dev.kobj, 377 sysfs_remove_bin_file(&sl->dev.kobj,
378 &(w1_f29_sysfs_bin_files[i])); 378 &(w1_f29_sysfs_bin_files[i]));
379} 379}
diff --git a/drivers/w1/slaves/w1_smem.c b/drivers/w1/slaves/w1_smem.c
index cc8c02e92593..84655625c870 100644
--- a/drivers/w1/slaves/w1_smem.c
+++ b/drivers/w1/slaves/w1_smem.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * w1_smem.c 2 * w1_smem.c
3 * 3 *
4 * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> 4 * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
5 * 5 *
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
@@ -32,7 +32,7 @@
32#include "../w1_family.h" 32#include "../w1_family.h"
33 33
34MODULE_LICENSE("GPL"); 34MODULE_LICENSE("GPL");
35MODULE_AUTHOR("Evgeniy Polyakov <johnpol@2ka.mipt.ru>"); 35MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
36MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol, 64bit memory family."); 36MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol, 64bit memory family.");
37 37
38static struct w1_family w1_smem_family_01 = { 38static struct w1_family w1_smem_family_01 = {
diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c
index 402928b135d1..a1ef9b5b38cf 100644
--- a/drivers/w1/slaves/w1_therm.c
+++ b/drivers/w1/slaves/w1_therm.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * w1_therm.c 2 * w1_therm.c
3 * 3 *
4 * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> 4 * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
5 * 5 *
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
@@ -34,7 +34,7 @@
34#include "../w1_family.h" 34#include "../w1_family.h"
35 35
36MODULE_LICENSE("GPL"); 36MODULE_LICENSE("GPL");
37MODULE_AUTHOR("Evgeniy Polyakov <johnpol@2ka.mipt.ru>"); 37MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
38MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol, temperature family."); 38MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol, temperature family.");
39 39
40/* Allow the strong pullup to be disabled, but default to enabled. 40/* Allow the strong pullup to be disabled, but default to enabled.
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index 6c136c19e982..c37497823851 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * w1.c 2 * w1.c
3 * 3 *
4 * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> 4 * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
5 * 5 *
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
@@ -42,7 +42,7 @@
42#include "w1_netlink.h" 42#include "w1_netlink.h"
43 43
44MODULE_LICENSE("GPL"); 44MODULE_LICENSE("GPL");
45MODULE_AUTHOR("Evgeniy Polyakov <johnpol@2ka.mipt.ru>"); 45MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
46MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol."); 46MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol.");
47 47
48static int w1_timeout = 10; 48static int w1_timeout = 10;
diff --git a/drivers/w1/w1.h b/drivers/w1/w1.h
index 1ce23fc6186c..4d012ca3f32c 100644
--- a/drivers/w1/w1.h
+++ b/drivers/w1/w1.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * w1.h 2 * w1.h
3 * 3 *
4 * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> 4 * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
5 * 5 *
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
diff --git a/drivers/w1/w1_family.c b/drivers/w1/w1_family.c
index 4a099041f28a..63359797c8b1 100644
--- a/drivers/w1/w1_family.c
+++ b/drivers/w1/w1_family.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * w1_family.c 2 * w1_family.c
3 * 3 *
4 * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> 4 * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
5 * 5 *
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
diff --git a/drivers/w1/w1_family.h b/drivers/w1/w1_family.h
index 98a1ac0f4693..490cda2281bc 100644
--- a/drivers/w1/w1_family.h
+++ b/drivers/w1/w1_family.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * w1_family.h 2 * w1_family.h
3 * 3 *
4 * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> 4 * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
5 * 5 *
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
diff --git a/drivers/w1/w1_int.c b/drivers/w1/w1_int.c
index b50be3f1073d..d220bce2cee4 100644
--- a/drivers/w1/w1_int.c
+++ b/drivers/w1/w1_int.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * w1_int.c 2 * w1_int.c
3 * 3 *
4 * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> 4 * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
5 * 5 *
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
diff --git a/drivers/w1/w1_int.h b/drivers/w1/w1_int.h
index 4274082d2262..2ad7d4414bed 100644
--- a/drivers/w1/w1_int.h
+++ b/drivers/w1/w1_int.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * w1_int.h 2 * w1_int.h
3 * 3 *
4 * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> 4 * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
5 * 5 *
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
diff --git a/drivers/w1/w1_io.c b/drivers/w1/w1_io.c
index 8e8b64cfafb6..765b37b62a4f 100644
--- a/drivers/w1/w1_io.c
+++ b/drivers/w1/w1_io.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * w1_io.c 2 * w1_io.c
3 * 3 *
4 * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> 4 * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
5 * 5 *
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
diff --git a/drivers/w1/w1_log.h b/drivers/w1/w1_log.h
index e6ab7cf08f88..9c7bd62e6bdc 100644
--- a/drivers/w1/w1_log.h
+++ b/drivers/w1/w1_log.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * w1_log.h 2 * w1_log.h
3 * 3 *
4 * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> 4 * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
5 * 5 *
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
diff --git a/drivers/w1/w1_netlink.c b/drivers/w1/w1_netlink.c
index 55aabd927c60..40788c925d1c 100644
--- a/drivers/w1/w1_netlink.c
+++ b/drivers/w1/w1_netlink.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * w1_netlink.c 2 * w1_netlink.c
3 * 3 *
4 * Copyright (c) 2003 Evgeniy Polyakov <johnpol@2ka.mipt.ru> 4 * Copyright (c) 2003 Evgeniy Polyakov <zbr@ioremap.net>
5 * 5 *
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
diff --git a/drivers/w1/w1_netlink.h b/drivers/w1/w1_netlink.h
index 27e950f935b1..b0922dc29658 100644
--- a/drivers/w1/w1_netlink.h
+++ b/drivers/w1/w1_netlink.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * w1_netlink.h 2 * w1_netlink.h
3 * 3 *
4 * Copyright (c) 2003 Evgeniy Polyakov <johnpol@2ka.mipt.ru> 4 * Copyright (c) 2003 Evgeniy Polyakov <zbr@ioremap.net>
5 * 5 *
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index 410fba45378d..809cbda03d7a 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -494,15 +494,16 @@ static int hpwdt_pretimeout(struct notifier_block *nb, unsigned long ulReason,
494 asminline_call(&cmn_regs, cru_rom_addr); 494 asminline_call(&cmn_regs, cru_rom_addr);
495 die_nmi_called = 1; 495 die_nmi_called = 1;
496 spin_unlock_irqrestore(&rom_lock, rom_pl); 496 spin_unlock_irqrestore(&rom_lock, rom_pl);
497
498 if (allow_kdump)
499 hpwdt_stop();
500
497 if (!is_icru) { 501 if (!is_icru) {
498 if (cmn_regs.u1.ral == 0) { 502 if (cmn_regs.u1.ral == 0) {
499 printk(KERN_WARNING "hpwdt: An NMI occurred, " 503 panic("An NMI occurred, "
500 "but unable to determine source.\n"); 504 "but unable to determine source.\n");
501 } 505 }
502 } 506 }
503
504 if (allow_kdump)
505 hpwdt_stop();
506 panic("An NMI occurred, please see the Integrated " 507 panic("An NMI occurred, please see the Integrated "
507 "Management Log for details.\n"); 508 "Management Log for details.\n");
508 509
diff --git a/drivers/watchdog/lantiq_wdt.c b/drivers/watchdog/lantiq_wdt.c
index 7d82adac1cb2..102aed0efbf1 100644
--- a/drivers/watchdog/lantiq_wdt.c
+++ b/drivers/watchdog/lantiq_wdt.c
@@ -51,16 +51,16 @@ static int ltq_wdt_ok_to_close;
51static void 51static void
52ltq_wdt_enable(void) 52ltq_wdt_enable(void)
53{ 53{
54 ltq_wdt_timeout = ltq_wdt_timeout * 54 unsigned long int timeout = ltq_wdt_timeout *
55 (ltq_io_region_clk_rate / LTQ_WDT_DIVIDER) + 0x1000; 55 (ltq_io_region_clk_rate / LTQ_WDT_DIVIDER) + 0x1000;
56 if (ltq_wdt_timeout > LTQ_MAX_TIMEOUT) 56 if (timeout > LTQ_MAX_TIMEOUT)
57 ltq_wdt_timeout = LTQ_MAX_TIMEOUT; 57 timeout = LTQ_MAX_TIMEOUT;
58 58
59 /* write the first password magic */ 59 /* write the first password magic */
60 ltq_w32(LTQ_WDT_PW1, ltq_wdt_membase + LTQ_WDT_CR); 60 ltq_w32(LTQ_WDT_PW1, ltq_wdt_membase + LTQ_WDT_CR);
61 /* write the second magic plus the configuration and new timeout */ 61 /* write the second magic plus the configuration and new timeout */
62 ltq_w32(LTQ_WDT_SR_EN | LTQ_WDT_SR_PWD | LTQ_WDT_SR_CLKDIV | 62 ltq_w32(LTQ_WDT_SR_EN | LTQ_WDT_SR_PWD | LTQ_WDT_SR_CLKDIV |
63 LTQ_WDT_PW2 | ltq_wdt_timeout, ltq_wdt_membase + LTQ_WDT_CR); 63 LTQ_WDT_PW2 | timeout, ltq_wdt_membase + LTQ_WDT_CR);
64} 64}
65 65
66static void 66static void
diff --git a/drivers/watchdog/sbc_epx_c3.c b/drivers/watchdog/sbc_epx_c3.c
index 3066a5127ca8..eaca366b7234 100644
--- a/drivers/watchdog/sbc_epx_c3.c
+++ b/drivers/watchdog/sbc_epx_c3.c
@@ -173,7 +173,7 @@ static struct notifier_block epx_c3_notifier = {
173 .notifier_call = epx_c3_notify_sys, 173 .notifier_call = epx_c3_notify_sys,
174}; 174};
175 175
176static const char banner[] __initdata = KERN_INFO PFX 176static const char banner[] __initconst = KERN_INFO PFX
177 "Hardware Watchdog Timer for Winsystems EPX-C3 SBC: 0.1\n"; 177 "Hardware Watchdog Timer for Winsystems EPX-C3 SBC: 0.1\n";
178 178
179static int __init watchdog_init(void) 179static int __init watchdog_init(void)
diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
index d33520d0b4c9..1199da0f98cf 100644
--- a/drivers/watchdog/watchdog_dev.c
+++ b/drivers/watchdog/watchdog_dev.c
@@ -59,7 +59,7 @@ static struct watchdog_device *wdd;
59 59
60static int watchdog_ping(struct watchdog_device *wddev) 60static int watchdog_ping(struct watchdog_device *wddev)
61{ 61{
62 if (test_bit(WDOG_ACTIVE, &wdd->status)) { 62 if (test_bit(WDOG_ACTIVE, &wddev->status)) {
63 if (wddev->ops->ping) 63 if (wddev->ops->ping)
64 return wddev->ops->ping(wddev); /* ping the watchdog */ 64 return wddev->ops->ping(wddev); /* ping the watchdog */
65 else 65 else
@@ -81,12 +81,12 @@ static int watchdog_start(struct watchdog_device *wddev)
81{ 81{
82 int err; 82 int err;
83 83
84 if (!test_bit(WDOG_ACTIVE, &wdd->status)) { 84 if (!test_bit(WDOG_ACTIVE, &wddev->status)) {
85 err = wddev->ops->start(wddev); 85 err = wddev->ops->start(wddev);
86 if (err < 0) 86 if (err < 0)
87 return err; 87 return err;
88 88
89 set_bit(WDOG_ACTIVE, &wdd->status); 89 set_bit(WDOG_ACTIVE, &wddev->status);
90 } 90 }
91 return 0; 91 return 0;
92} 92}
@@ -105,18 +105,18 @@ static int watchdog_stop(struct watchdog_device *wddev)
105{ 105{
106 int err = -EBUSY; 106 int err = -EBUSY;
107 107
108 if (test_bit(WDOG_NO_WAY_OUT, &wdd->status)) { 108 if (test_bit(WDOG_NO_WAY_OUT, &wddev->status)) {
109 pr_info("%s: nowayout prevents watchdog to be stopped!\n", 109 pr_info("%s: nowayout prevents watchdog to be stopped!\n",
110 wdd->info->identity); 110 wddev->info->identity);
111 return err; 111 return err;
112 } 112 }
113 113
114 if (test_bit(WDOG_ACTIVE, &wdd->status)) { 114 if (test_bit(WDOG_ACTIVE, &wddev->status)) {
115 err = wddev->ops->stop(wddev); 115 err = wddev->ops->stop(wddev);
116 if (err < 0) 116 if (err < 0)
117 return err; 117 return err;
118 118
119 clear_bit(WDOG_ACTIVE, &wdd->status); 119 clear_bit(WDOG_ACTIVE, &wddev->status);
120 } 120 }
121 return 0; 121 return 0;
122} 122}
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index da70f5c32eb9..7523719bf8a4 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -54,7 +54,7 @@
54 * This lock protects updates to the following mapping and reference-count 54 * This lock protects updates to the following mapping and reference-count
55 * arrays. The lock does not need to be acquired to read the mapping tables. 55 * arrays. The lock does not need to be acquired to read the mapping tables.
56 */ 56 */
57static DEFINE_SPINLOCK(irq_mapping_update_lock); 57static DEFINE_MUTEX(irq_mapping_update_lock);
58 58
59static LIST_HEAD(xen_irq_list_head); 59static LIST_HEAD(xen_irq_list_head);
60 60
@@ -631,7 +631,7 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi,
631 int irq = -1; 631 int irq = -1;
632 struct physdev_irq irq_op; 632 struct physdev_irq irq_op;
633 633
634 spin_lock(&irq_mapping_update_lock); 634 mutex_lock(&irq_mapping_update_lock);
635 635
636 irq = find_irq_by_gsi(gsi); 636 irq = find_irq_by_gsi(gsi);
637 if (irq != -1) { 637 if (irq != -1) {
@@ -684,7 +684,7 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi,
684 handle_edge_irq, name); 684 handle_edge_irq, name);
685 685
686out: 686out:
687 spin_unlock(&irq_mapping_update_lock); 687 mutex_unlock(&irq_mapping_update_lock);
688 688
689 return irq; 689 return irq;
690} 690}
@@ -710,7 +710,7 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
710{ 710{
711 int irq, ret; 711 int irq, ret;
712 712
713 spin_lock(&irq_mapping_update_lock); 713 mutex_lock(&irq_mapping_update_lock);
714 714
715 irq = xen_allocate_irq_dynamic(); 715 irq = xen_allocate_irq_dynamic();
716 if (irq == -1) 716 if (irq == -1)
@@ -724,10 +724,10 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
724 if (ret < 0) 724 if (ret < 0)
725 goto error_irq; 725 goto error_irq;
726out: 726out:
727 spin_unlock(&irq_mapping_update_lock); 727 mutex_unlock(&irq_mapping_update_lock);
728 return irq; 728 return irq;
729error_irq: 729error_irq:
730 spin_unlock(&irq_mapping_update_lock); 730 mutex_unlock(&irq_mapping_update_lock);
731 xen_free_irq(irq); 731 xen_free_irq(irq);
732 return -1; 732 return -1;
733} 733}
@@ -740,7 +740,7 @@ int xen_destroy_irq(int irq)
740 struct irq_info *info = info_for_irq(irq); 740 struct irq_info *info = info_for_irq(irq);
741 int rc = -ENOENT; 741 int rc = -ENOENT;
742 742
743 spin_lock(&irq_mapping_update_lock); 743 mutex_lock(&irq_mapping_update_lock);
744 744
745 desc = irq_to_desc(irq); 745 desc = irq_to_desc(irq);
746 if (!desc) 746 if (!desc)
@@ -766,7 +766,7 @@ int xen_destroy_irq(int irq)
766 xen_free_irq(irq); 766 xen_free_irq(irq);
767 767
768out: 768out:
769 spin_unlock(&irq_mapping_update_lock); 769 mutex_unlock(&irq_mapping_update_lock);
770 return rc; 770 return rc;
771} 771}
772 772
@@ -776,7 +776,7 @@ int xen_irq_from_pirq(unsigned pirq)
776 776
777 struct irq_info *info; 777 struct irq_info *info;
778 778
779 spin_lock(&irq_mapping_update_lock); 779 mutex_lock(&irq_mapping_update_lock);
780 780
781 list_for_each_entry(info, &xen_irq_list_head, list) { 781 list_for_each_entry(info, &xen_irq_list_head, list) {
782 if (info == NULL || info->type != IRQT_PIRQ) 782 if (info == NULL || info->type != IRQT_PIRQ)
@@ -787,7 +787,7 @@ int xen_irq_from_pirq(unsigned pirq)
787 } 787 }
788 irq = -1; 788 irq = -1;
789out: 789out:
790 spin_unlock(&irq_mapping_update_lock); 790 mutex_unlock(&irq_mapping_update_lock);
791 791
792 return irq; 792 return irq;
793} 793}
@@ -802,7 +802,7 @@ int bind_evtchn_to_irq(unsigned int evtchn)
802{ 802{
803 int irq; 803 int irq;
804 804
805 spin_lock(&irq_mapping_update_lock); 805 mutex_lock(&irq_mapping_update_lock);
806 806
807 irq = evtchn_to_irq[evtchn]; 807 irq = evtchn_to_irq[evtchn];
808 808
@@ -818,7 +818,7 @@ int bind_evtchn_to_irq(unsigned int evtchn)
818 } 818 }
819 819
820out: 820out:
821 spin_unlock(&irq_mapping_update_lock); 821 mutex_unlock(&irq_mapping_update_lock);
822 822
823 return irq; 823 return irq;
824} 824}
@@ -829,7 +829,7 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
829 struct evtchn_bind_ipi bind_ipi; 829 struct evtchn_bind_ipi bind_ipi;
830 int evtchn, irq; 830 int evtchn, irq;
831 831
832 spin_lock(&irq_mapping_update_lock); 832 mutex_lock(&irq_mapping_update_lock);
833 833
834 irq = per_cpu(ipi_to_irq, cpu)[ipi]; 834 irq = per_cpu(ipi_to_irq, cpu)[ipi];
835 835
@@ -853,7 +853,7 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
853 } 853 }
854 854
855 out: 855 out:
856 spin_unlock(&irq_mapping_update_lock); 856 mutex_unlock(&irq_mapping_update_lock);
857 return irq; 857 return irq;
858} 858}
859 859
@@ -878,7 +878,7 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
878 struct evtchn_bind_virq bind_virq; 878 struct evtchn_bind_virq bind_virq;
879 int evtchn, irq; 879 int evtchn, irq;
880 880
881 spin_lock(&irq_mapping_update_lock); 881 mutex_lock(&irq_mapping_update_lock);
882 882
883 irq = per_cpu(virq_to_irq, cpu)[virq]; 883 irq = per_cpu(virq_to_irq, cpu)[virq];
884 884
@@ -903,7 +903,7 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
903 } 903 }
904 904
905out: 905out:
906 spin_unlock(&irq_mapping_update_lock); 906 mutex_unlock(&irq_mapping_update_lock);
907 907
908 return irq; 908 return irq;
909} 909}
@@ -913,7 +913,7 @@ static void unbind_from_irq(unsigned int irq)
913 struct evtchn_close close; 913 struct evtchn_close close;
914 int evtchn = evtchn_from_irq(irq); 914 int evtchn = evtchn_from_irq(irq);
915 915
916 spin_lock(&irq_mapping_update_lock); 916 mutex_lock(&irq_mapping_update_lock);
917 917
918 if (VALID_EVTCHN(evtchn)) { 918 if (VALID_EVTCHN(evtchn)) {
919 close.port = evtchn; 919 close.port = evtchn;
@@ -943,7 +943,7 @@ static void unbind_from_irq(unsigned int irq)
943 943
944 xen_free_irq(irq); 944 xen_free_irq(irq);
945 945
946 spin_unlock(&irq_mapping_update_lock); 946 mutex_unlock(&irq_mapping_update_lock);
947} 947}
948 948
949int bind_evtchn_to_irqhandler(unsigned int evtchn, 949int bind_evtchn_to_irqhandler(unsigned int evtchn,
@@ -1279,7 +1279,7 @@ void rebind_evtchn_irq(int evtchn, int irq)
1279 will also be masked. */ 1279 will also be masked. */
1280 disable_irq(irq); 1280 disable_irq(irq);
1281 1281
1282 spin_lock(&irq_mapping_update_lock); 1282 mutex_lock(&irq_mapping_update_lock);
1283 1283
1284 /* After resume the irq<->evtchn mappings are all cleared out */ 1284 /* After resume the irq<->evtchn mappings are all cleared out */
1285 BUG_ON(evtchn_to_irq[evtchn] != -1); 1285 BUG_ON(evtchn_to_irq[evtchn] != -1);
@@ -1289,7 +1289,7 @@ void rebind_evtchn_irq(int evtchn, int irq)
1289 1289
1290 xen_irq_info_evtchn_init(irq, evtchn); 1290 xen_irq_info_evtchn_init(irq, evtchn);
1291 1291
1292 spin_unlock(&irq_mapping_update_lock); 1292 mutex_unlock(&irq_mapping_update_lock);
1293 1293
1294 /* new event channels are always bound to cpu 0 */ 1294 /* new event channels are always bound to cpu 0 */
1295 irq_set_affinity(irq, cpumask_of(0)); 1295 irq_set_affinity(irq, cpumask_of(0));
diff --git a/drivers/xen/xen-selfballoon.c b/drivers/xen/xen-selfballoon.c
index 1b4afd81f872..6ea852e25162 100644
--- a/drivers/xen/xen-selfballoon.c
+++ b/drivers/xen/xen-selfballoon.c
@@ -70,6 +70,7 @@
70#include <linux/kernel.h> 70#include <linux/kernel.h>
71#include <linux/mm.h> 71#include <linux/mm.h>
72#include <linux/mman.h> 72#include <linux/mman.h>
73#include <linux/module.h>
73#include <linux/workqueue.h> 74#include <linux/workqueue.h>
74#include <xen/balloon.h> 75#include <xen/balloon.h>
75#include <xen/tmem.h> 76#include <xen/tmem.h>
diff --git a/drivers/zorro/zorro.c b/drivers/zorro/zorro.c
index e0c2807b0970..181fa8158a8b 100644
--- a/drivers/zorro/zorro.c
+++ b/drivers/zorro/zorro.c
@@ -148,10 +148,10 @@ static int __init amiga_zorro_probe(struct platform_device *pdev)
148 } 148 }
149 platform_set_drvdata(pdev, bus); 149 platform_set_drvdata(pdev, bus);
150 150
151 /* Register all devices */
152 pr_info("Zorro: Probing AutoConfig expansion devices: %u device%s\n", 151 pr_info("Zorro: Probing AutoConfig expansion devices: %u device%s\n",
153 zorro_num_autocon, zorro_num_autocon == 1 ? "" : "s"); 152 zorro_num_autocon, zorro_num_autocon == 1 ? "" : "s");
154 153
154 /* First identify all devices ... */
155 for (i = 0; i < zorro_num_autocon; i++) { 155 for (i = 0; i < zorro_num_autocon; i++) {
156 z = &zorro_autocon[i]; 156 z = &zorro_autocon[i];
157 z->id = (z->rom.er_Manufacturer<<16) | (z->rom.er_Product<<8); 157 z->id = (z->rom.er_Manufacturer<<16) | (z->rom.er_Product<<8);
@@ -172,6 +172,11 @@ static int __init amiga_zorro_probe(struct platform_device *pdev)
172 dev_set_name(&z->dev, "%02x", i); 172 dev_set_name(&z->dev, "%02x", i);
173 z->dev.parent = &bus->dev; 173 z->dev.parent = &bus->dev;
174 z->dev.bus = &zorro_bus_type; 174 z->dev.bus = &zorro_bus_type;
175 }
176
177 /* ... then register them */
178 for (i = 0; i < zorro_num_autocon; i++) {
179 z = &zorro_autocon[i];
175 error = device_register(&z->dev); 180 error = device_register(&z->dev);
176 if (error) { 181 if (error) {
177 dev_err(&bus->dev, "Error registering device %s\n", 182 dev_err(&bus->dev, "Error registering device %s\n",