aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/apei/erst.c31
-rw-r--r--drivers/acpi/processor_idle.c29
-rw-r--r--drivers/ata/ahci.c3
-rw-r--r--drivers/ata/ahci_platform.c6
-rw-r--r--drivers/ata/libata-eh.c12
-rw-r--r--drivers/ata/libata-pmp.c7
-rw-r--r--drivers/ata/libata-scsi.c4
-rw-r--r--drivers/ata/libata-sff.c4
-rw-r--r--drivers/ata/pata_of_platform.c2
-rw-r--r--drivers/ata/sata_sis.c2
-rw-r--r--drivers/base/core.c5
-rw-r--r--drivers/base/node.c14
-rw-r--r--drivers/base/power/clock_ops.c3
-rw-r--r--drivers/base/power/main.c3
-rw-r--r--drivers/base/power/opp.c2
-rw-r--r--drivers/base/power/qos.c18
-rw-r--r--drivers/block/cciss.c12
-rw-r--r--drivers/block/cciss_scsi.c1
-rw-r--r--drivers/block/loop.c51
-rw-r--r--drivers/block/paride/pg.c1
-rw-r--r--drivers/block/rbd.c101
-rw-r--r--drivers/block/swim3.c362
-rw-r--r--drivers/bluetooth/Kconfig6
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c15
-rw-r--r--drivers/bluetooth/btusb.c6
-rw-r--r--drivers/char/agp/intel-gtt.c7
-rw-r--r--drivers/char/random.c2
-rw-r--r--drivers/cpufreq/db8500-cpufreq.c6
-rw-r--r--drivers/crypto/mv_cesa.c12
-rw-r--r--drivers/devfreq/Kconfig41
-rw-r--r--drivers/devfreq/devfreq.c10
-rw-r--r--drivers/edac/mpc85xx_edac.c2
-rw-r--r--drivers/firmware/dmi_scan.c6
-rw-r--r--drivers/firmware/efivars.c12
-rw-r--r--drivers/firmware/iscsi_ibft.c42
-rw-r--r--drivers/firmware/iscsi_ibft_find.c26
-rw-r--r--drivers/firmware/sigma.c81
-rw-r--r--drivers/gpio/Makefile2
-rw-r--r--drivers/gpio/gpio-da9052.c21
-rw-r--r--drivers/gpio/gpio-ml-ioh.c32
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c18
-rw-r--r--drivers/gpio/gpio-omap.c59
-rw-r--r--drivers/gpio/gpio-pca953x.c15
-rw-r--r--drivers/gpio/gpio-pl061.c4
-rw-r--r--drivers/gpu/drm/Kconfig2
-rw-r--r--drivers/gpu/drm/drm_crtc.c8
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c40
-rw-r--r--drivers/gpu/drm/drm_debugfs.c12
-rw-r--r--drivers/gpu/drm/drm_drv.c2
-rw-r--r--drivers/gpu/drm/drm_irq.c31
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.c62
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.h21
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_connector.c78
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c76
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.h25
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h13
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.c83
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.h1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c66
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c44
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c71
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c89
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h28
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c63
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c10
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c53
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h37
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c14
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c19
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c1
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h51
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c122
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c546
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h1
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c8
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c19
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c36
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c22
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c45
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_perf.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c13
-rw-r--r--drivers/gpu/drm/nouveau/nv40_pm.c20
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv50_grctx.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_vram.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.c43
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grctx.c31
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_vram.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvd0_display.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c41
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c2
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c60
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c195
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h29
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h31
-rw-r--r--drivers/gpu/drm/radeon/r100.c7
-rw-r--r--drivers/gpu/drm/radeon/r300.c94
-rw-r--r--drivers/gpu/drm/radeon/r600.c118
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c26
-rw-r--r--drivers/gpu/drm/radeon/radeon.h53
-rw-r--r--drivers/gpu/drm/radeon/radeon_acpi.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c304
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c27
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c25
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c18
-rw-r--r--drivers/gpu/drm/radeon/rs600.c7
-rw-r--r--drivers/gpu/drm/radeon/rv770.c7
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c25
-rw-r--r--drivers/gpu/vga/vgaarb.c62
-rw-r--r--drivers/hid/hid-core.c2
-rw-r--r--drivers/hid/hid-ids.h2
-rw-r--r--drivers/hwmon/Kconfig2
-rw-r--r--drivers/hwmon/ad7314.c1
-rw-r--r--drivers/hwmon/ads7871.c1
-rw-r--r--drivers/hwmon/exynos4_tmu.c12
-rw-r--r--drivers/hwmon/gpio-fan.c13
-rw-r--r--drivers/hwmon/jz4740-hwmon.c16
-rw-r--r--drivers/hwmon/ntc_thermistor.c14
-rw-r--r--drivers/hwmon/s3c-hwmon.c13
-rw-r--r--drivers/hwmon/sch5627.c13
-rw-r--r--drivers/hwmon/sch5636.c13
-rw-r--r--drivers/hwmon/twl4030-madc-hwmon.c14
-rw-r--r--drivers/hwmon/ultra45_env.c13
-rw-r--r--drivers/hwmon/wm831x-hwmon.c12
-rw-r--r--drivers/hwmon/wm8350-hwmon.c12
-rw-r--r--drivers/hwspinlock/u8500_hsem.c7
-rw-r--r--drivers/i2c/algos/i2c-algo-bit.c4
-rw-r--r--drivers/i2c/busses/i2c-nuc900.c2
-rw-r--r--drivers/i2c/i2c-core.c4
-rw-r--r--drivers/i2c/i2c-dev.c2
-rw-r--r--drivers/ide/cy82c693.c6
-rw-r--r--drivers/ide/icside.c2
-rw-r--r--drivers/ide/ide-cd.c1
-rw-r--r--drivers/ide/ide-floppy.c1
-rw-r--r--drivers/ide/ide-tape.c1
-rw-r--r--drivers/ide/piix.c18
-rw-r--r--drivers/ide/triflex.c16
-rw-r--r--drivers/infiniband/core/addr.c9
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c4
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c10
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c6
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c18
-rw-r--r--drivers/infiniband/hw/qib/qib_qsfp.c12
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c13
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c20
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c13
-rw-r--r--drivers/input/mouse/elantech.c26
-rw-r--r--drivers/input/serio/ams_delta_serio.c1
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h14
-rw-r--r--drivers/iommu/intel-iommu.c7
-rw-r--r--drivers/iommu/intr_remapping.c2
-rw-r--r--drivers/iommu/omap-iommu-debug.c1
-rw-r--r--drivers/iommu/omap-iovmm.c1
-rw-r--r--drivers/isdn/divert/divert_procfs.c6
-rw-r--r--drivers/isdn/i4l/isdn_net.c3
-rw-r--r--drivers/leds/led-class.c5
-rw-r--r--drivers/macintosh/via-macii.c2
-rw-r--r--drivers/macintosh/via-maciisi.c4
-rw-r--r--drivers/md/bitmap.c4
-rw-r--r--drivers/md/md.c27
-rw-r--r--drivers/md/raid5.c24
-rw-r--r--drivers/media/dvb/dvb-usb/mxl111sf-i2c.c3
-rw-r--r--drivers/media/dvb/dvb-usb/mxl111sf-phy.c7
-rw-r--r--drivers/media/video/s5k6aa.c1
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_dec.c4
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_enc.c4
-rw-r--r--drivers/media/video/uvc/uvc_ctrl.c6
-rw-r--r--drivers/media/video/v4l2-ctrls.c5
-rw-r--r--drivers/media/video/v4l2-event.c10
-rw-r--r--drivers/media/video/videobuf2-core.c6
-rw-r--r--drivers/mfd/ab5500-core.c1
-rw-r--r--drivers/mfd/ab5500-debugfs.c1
-rw-r--r--drivers/misc/Kconfig15
-rw-r--r--drivers/misc/ad525x_dpot.h2
-rw-r--r--drivers/misc/carma/carma-fpga-program.c9
-rw-r--r--drivers/misc/carma/carma-fpga.c9
-rw-r--r--drivers/misc/eeprom/Kconfig2
-rw-r--r--drivers/misc/pch_phub.c81
-rw-r--r--drivers/misc/spear13xx_pcie_gadget.c2
-rw-r--r--drivers/mmc/card/block.c8
-rw-r--r--drivers/mmc/core/core.c98
-rw-r--r--drivers/mmc/core/mmc.c12
-rw-r--r--drivers/mmc/host/mxcmmc.c1
-rw-r--r--drivers/mmc/host/omap_hsmmc.c7
-rw-r--r--drivers/mmc/host/sdhci-cns3xxx.c1
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c8
-rw-r--r--drivers/mmc/host/sdhci-s3c.c2
-rw-r--r--drivers/mmc/host/sh_mmcif.c2
-rw-r--r--drivers/mmc/host/tmio_mmc_pio.c2
-rw-r--r--drivers/mtd/maps/bcm963xx-flash.c1
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/arcnet/Kconfig2
-rw-r--r--drivers/net/bonding/bond_main.c33
-rw-r--r--drivers/net/bonding/bond_sysfs.c7
-rw-r--r--drivers/net/can/sja1000/peak_pci.c1
-rw-r--r--drivers/net/ethernet/broadcom/b44.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c39
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c50
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c15
-rw-r--r--drivers/net/ethernet/cadence/Kconfig1
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c2
-rw-r--r--drivers/net/ethernet/freescale/Kconfig1
-rw-r--r--drivers/net/ethernet/freescale/fec.c11
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c53
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea.h4
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c6
-rw-r--r--drivers/net/ethernet/ibm/iseries_veth.c2
-rw-r--r--drivers/net/ethernet/jme.c113
-rw-r--r--drivers/net/ethernet/jme.h19
-rw-r--r--drivers/net/ethernet/lantiq_etop.c2
-rw-r--r--drivers/net/ethernet/marvell/sky2.c106
-rw-r--r--drivers/net/ethernet/marvell/sky2.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h1
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c36
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c2
-rw-r--r--drivers/net/ethernet/pasemi/Makefile3
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge.h8
-rw-r--r--drivers/net/ethernet/rdc/r6040.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c55
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c131
-rw-r--r--drivers/net/ethernet/sun/sunhme.c2
-rw-r--r--drivers/net/ethernet/tile/tilepro.c8
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c7
-rw-r--r--drivers/net/hippi/Kconfig2
-rw-r--r--drivers/net/phy/Kconfig2
-rw-r--r--drivers/net/ppp/pptp.c4
-rw-r--r--drivers/net/usb/asix.c68
-rw-r--r--drivers/net/usb/cdc_ether.c2
-rw-r--r--drivers/net/usb/lg-vl600.c25
-rw-r--r--drivers/net/usb/smsc75xx.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c2
-rw-r--r--drivers/net/wireless/ath/regd.c2
-rw-r--r--drivers/net/wireless/b43/xmit.c15
-rw-r--r--drivers/net/wireless/b43/xmit.h16
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/dma.c5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rxon.c36
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-sta.c5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c34
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c22
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-shared.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie.c33
-rw-r--r--drivers/net/wireless/libertas/cfg.c2
-rw-r--r--drivers/net/wireless/libertas/if_spi.c1
-rw-r--r--drivers/net/wireless/mwifiex/scan.c9
-rw-r--r--drivers/net/wireless/p54/p54spi.c5
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c1
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h1
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c22
-rw-r--r--drivers/net/wireless/rtlwifi/ps.c17
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/phy.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/phy.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/phy.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/phy.c2
-rw-r--r--drivers/net/wireless/wl12xx/scan.c2
-rw-r--r--drivers/net/xen-netback/netback.c4
-rw-r--r--drivers/of/irq.c29
-rw-r--r--drivers/oprofile/oprof.c29
-rw-r--r--drivers/oprofile/timer_int.c1
-rw-r--r--drivers/pci/Kconfig1
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c29
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c3
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c27
-rw-r--r--drivers/pci/hotplug/shpchp_core.c4
-rw-r--r--drivers/pci/hotplug/shpchp_hpc.c4
-rw-r--r--drivers/pinctrl/Kconfig22
-rw-r--r--drivers/platform/x86/Kconfig4
-rw-r--r--drivers/platform/x86/dell-laptop.c4
-rw-r--r--drivers/platform/x86/toshiba_acpi.c21
-rw-r--r--drivers/power/intel_mid_battery.c12
-rw-r--r--drivers/ps3/ps3-vuart.c2
-rw-r--r--drivers/ps3/ps3stor_lib.c2
-rw-r--r--drivers/ptp/ptp_clock.c4
-rw-r--r--drivers/rapidio/devices/tsi721.c41
-rw-r--r--drivers/rapidio/devices/tsi721.h2
-rw-r--r--drivers/regulator/aat2870-regulator.c2
-rw-r--r--drivers/regulator/core.c2
-rw-r--r--drivers/regulator/tps65910-regulator.c14
-rw-r--r--drivers/regulator/twl-regulator.c46
-rw-r--r--drivers/rtc/class.c10
-rw-r--r--drivers/rtc/interface.c44
-rw-r--r--drivers/rtc/rtc-mrst.c19
-rw-r--r--drivers/rtc/rtc-puv3.c4
-rw-r--r--drivers/rtc/rtc-s3c.c2
-rw-r--r--drivers/s390/char/zcore.c2
-rw-r--r--drivers/s390/cio/chsc.c7
-rw-r--r--drivers/s390/cio/cio.h5
-rw-r--r--drivers/s390/cio/css.c104
-rw-r--r--drivers/s390/cio/device.c4
-rw-r--r--drivers/s390/cio/device_fsm.c30
-rw-r--r--drivers/s390/cio/device_ops.c20
-rw-r--r--drivers/s390/cio/io_sch.h5
-rw-r--r--drivers/s390/crypto/ap_bus.c25
-rw-r--r--drivers/s390/net/Kconfig2
-rw-r--r--drivers/s390/net/lcs.c6
-rw-r--r--drivers/s390/net/netiucv.c2
-rw-r--r--drivers/s390/net/qeth_core.h3
-rw-r--r--drivers/s390/net/qeth_core_main.c1
-rw-r--r--drivers/s390/net/qeth_l3_main.c7
-rw-r--r--drivers/s390/net/qeth_l3_sys.c4
-rw-r--r--drivers/sbus/char/bbc_i2c.c27
-rw-r--r--drivers/sbus/char/display7seg.c13
-rw-r--r--drivers/sbus/char/envctrl.c12
-rw-r--r--drivers/sbus/char/flash.c12
-rw-r--r--drivers/sbus/char/uctrl.c12
-rw-r--r--drivers/scsi/aacraid/linit.c4
-rw-r--r--drivers/scsi/hpsa.c5
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c5
-rw-r--r--drivers/scsi/scsi_lib.c3
-rw-r--r--drivers/scsi/scsi_scan.c6
-rw-r--r--drivers/sh/Makefile8
-rw-r--r--drivers/sh/clk/core.c107
-rw-r--r--drivers/sh/pm_runtime.c65
-rw-r--r--drivers/spi/Kconfig4
-rw-r--r--drivers/spi/spi-ath79.c1
-rw-r--r--drivers/spi/spi-atmel.c5
-rw-r--r--drivers/spi/spi-gpio.c4
-rw-r--r--drivers/spi/spi-nuc900.c3
-rw-r--r--drivers/spi/spi-pl022.c8
-rw-r--r--drivers/ssb/driver_pcicore.c8
-rw-r--r--drivers/staging/comedi/comedi_fops.c96
-rw-r--r--drivers/staging/comedi/drivers/usbduxsigma.c7
-rw-r--r--drivers/staging/et131x/Kconfig3
-rw-r--r--drivers/staging/et131x/et131x.c12
-rw-r--r--drivers/staging/iio/industrialio-core.c25
-rw-r--r--drivers/staging/media/as102/as102_drv.c4
-rw-r--r--drivers/staging/media/as102/as102_drv.h3
-rw-r--r--drivers/staging/octeon/ethernet-tx.c2
-rw-r--r--drivers/staging/rtl8712/usb_intf.c1
-rw-r--r--drivers/staging/rts_pstor/rtsx.c1
-rw-r--r--drivers/staging/slicoss/Kconfig2
-rw-r--r--drivers/staging/tidspbridge/core/dsp-clock.c15
-rw-r--r--drivers/staging/tidspbridge/rmgr/drv_interface.c4
-rw-r--r--drivers/staging/usbip/vhci_rx.c10
-rw-r--r--drivers/target/iscsi/iscsi_target.c26
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.c6
-rw-r--r--drivers/target/iscsi/iscsi_target_core.h3
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c3
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c13
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c3
-rw-r--r--drivers/target/loopback/tcm_loop.c41
-rw-r--r--drivers/target/target_core_alua.c27
-rw-r--r--drivers/target/target_core_cdb.c20
-rw-r--r--drivers/target/target_core_configfs.c11
-rw-r--r--drivers/target/target_core_device.c30
-rw-r--r--drivers/target/target_core_file.c20
-rw-r--r--drivers/target/target_core_iblock.c16
-rw-r--r--drivers/target/target_core_pr.c240
-rw-r--r--drivers/target/target_core_pscsi.c28
-rw-r--r--drivers/target/target_core_rd.c258
-rw-r--r--drivers/target/target_core_tmr.c4
-rw-r--r--drivers/target/target_core_transport.c260
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c2
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c3
-rw-r--r--drivers/tty/hvc/hvc_dcc.c2
-rw-r--r--drivers/tty/n_gsm.c12
-rw-r--r--drivers/tty/serial/Kconfig14
-rw-r--r--drivers/tty/serial/atmel_serial.c16
-rw-r--r--drivers/tty/serial/crisv10.c10
-rw-r--r--drivers/tty/serial/mfd.c4
-rw-r--r--drivers/tty/serial/pch_uart.c19
-rw-r--r--drivers/tty/serial/sh-sci.c19
-rw-r--r--drivers/tty/tty_ldisc.c30
-rw-r--r--drivers/usb/class/cdc-acm.c18
-rw-r--r--drivers/usb/core/hub.c6
-rw-r--r--drivers/usb/core/quirks.c27
-rw-r--r--drivers/usb/dwc3/gadget.c1
-rw-r--r--drivers/usb/gadget/Kconfig9
-rw-r--r--drivers/usb/gadget/amd5536udc.c2
-rw-r--r--drivers/usb/gadget/ci13xxx_msm.c2
-rw-r--r--drivers/usb/gadget/ci13xxx_udc.c21
-rw-r--r--drivers/usb/gadget/f_mass_storage.c7
-rw-r--r--drivers/usb/gadget/f_midi.c138
-rw-r--r--drivers/usb/gadget/f_phonet.c2
-rw-r--r--drivers/usb/gadget/f_serial.c4
-rw-r--r--drivers/usb/gadget/file_storage.c4
-rw-r--r--drivers/usb/gadget/fsl_mxc_udc.c3
-rw-r--r--drivers/usb/gadget/fsl_qe_udc.c3
-rw-r--r--drivers/usb/gadget/fsl_udc_core.c80
-rw-r--r--drivers/usb/gadget/fsl_usb2_udc.h10
-rw-r--r--drivers/usb/gadget/inode.c5
-rw-r--r--drivers/usb/gadget/m66592-udc.c2
-rw-r--r--drivers/usb/gadget/net2280.c2
-rw-r--r--drivers/usb/gadget/pch_udc.c10
-rw-r--r--drivers/usb/gadget/r8a66597-udc.c32
-rw-r--r--drivers/usb/gadget/s3c-hsotg.c4
-rw-r--r--drivers/usb/gadget/s3c-hsudc.c3
-rw-r--r--drivers/usb/gadget/udc-core.c10
-rw-r--r--drivers/usb/host/ehci-sched.c22
-rw-r--r--drivers/usb/host/ehci-xls.c2
-rw-r--r--drivers/usb/host/ohci-at91.c6
-rw-r--r--drivers/usb/host/ohci-hcd.c15
-rw-r--r--drivers/usb/host/ohci-pci.c26
-rw-r--r--drivers/usb/host/ohci.h1
-rw-r--r--drivers/usb/host/pci-quirks.c57
-rw-r--r--drivers/usb/host/whci/qset.c2
-rw-r--r--drivers/usb/host/xhci-mem.c5
-rw-r--r--drivers/usb/host/xhci-ring.c13
-rw-r--r--drivers/usb/host/xhci.c39
-rw-r--r--drivers/usb/musb/Kconfig3
-rw-r--r--drivers/usb/musb/am35x.c1
-rw-r--r--drivers/usb/musb/da8xx.c1
-rw-r--r--drivers/usb/musb/musb_core.c9
-rw-r--r--drivers/usb/musb/musb_gadget.c6
-rw-r--r--drivers/usb/renesas_usbhs/common.c2
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c4
-rw-r--r--drivers/usb/renesas_usbhs/mod.c2
-rw-r--r--drivers/usb/renesas_usbhs/mod.h8
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c51
-rw-r--r--drivers/usb/renesas_usbhs/mod_host.c64
-rw-r--r--drivers/usb/serial/ark3116.c10
-rw-r--r--drivers/usb/serial/ftdi_sio.c15
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h1
-rw-r--r--drivers/usb/serial/option.c37
-rw-r--r--drivers/usb/serial/pl2303.c1
-rw-r--r--drivers/usb/serial/pl2303.h4
-rw-r--r--drivers/usb/storage/ene_ub6250.c3
-rw-r--r--drivers/usb/storage/protocol.c7
-rw-r--r--drivers/usb/storage/unusual_devs.h7
-rw-r--r--drivers/video/da8xx-fb.c15
-rw-r--r--drivers/video/omap/dispc.c1
-rw-r--r--drivers/video/omap2/dss/dispc.c11
-rw-r--r--drivers/video/omap2/dss/hdmi.c2
-rw-r--r--drivers/video/via/share.h4
-rw-r--r--drivers/virtio/Kconfig2
-rw-r--r--drivers/virtio/virtio_mmio.c2
-rw-r--r--drivers/virtio/virtio_pci.c29
-rw-r--r--drivers/watchdog/Kconfig7
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/adx_wdt.c355
-rw-r--r--drivers/watchdog/s3c2410_wdt.c4
-rw-r--r--drivers/watchdog/wm831x_wdt.c2
-rw-r--r--drivers/xen/balloon.c4
-rw-r--r--drivers/xen/gntalloc.c4
-rw-r--r--drivers/xen/gntdev.c10
-rw-r--r--drivers/xen/swiotlb-xen.c4
-rw-r--r--drivers/xen/xenbus/xenbus_client.c11
472 files changed, 5601 insertions, 4100 deletions
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index 127408069ca7..631b9477b99c 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -932,7 +932,8 @@ static int erst_check_table(struct acpi_table_erst *erst_tab)
932static int erst_open_pstore(struct pstore_info *psi); 932static int erst_open_pstore(struct pstore_info *psi);
933static int erst_close_pstore(struct pstore_info *psi); 933static int erst_close_pstore(struct pstore_info *psi);
934static ssize_t erst_reader(u64 *id, enum pstore_type_id *type, 934static ssize_t erst_reader(u64 *id, enum pstore_type_id *type,
935 struct timespec *time, struct pstore_info *psi); 935 struct timespec *time, char **buf,
936 struct pstore_info *psi);
936static int erst_writer(enum pstore_type_id type, u64 *id, unsigned int part, 937static int erst_writer(enum pstore_type_id type, u64 *id, unsigned int part,
937 size_t size, struct pstore_info *psi); 938 size_t size, struct pstore_info *psi);
938static int erst_clearer(enum pstore_type_id type, u64 id, 939static int erst_clearer(enum pstore_type_id type, u64 id,
@@ -986,17 +987,23 @@ static int erst_close_pstore(struct pstore_info *psi)
986} 987}
987 988
988static ssize_t erst_reader(u64 *id, enum pstore_type_id *type, 989static ssize_t erst_reader(u64 *id, enum pstore_type_id *type,
989 struct timespec *time, struct pstore_info *psi) 990 struct timespec *time, char **buf,
991 struct pstore_info *psi)
990{ 992{
991 int rc; 993 int rc;
992 ssize_t len = 0; 994 ssize_t len = 0;
993 u64 record_id; 995 u64 record_id;
994 struct cper_pstore_record *rcd = (struct cper_pstore_record *) 996 struct cper_pstore_record *rcd;
995 (erst_info.buf - sizeof(*rcd)); 997 size_t rcd_len = sizeof(*rcd) + erst_info.bufsize;
996 998
997 if (erst_disable) 999 if (erst_disable)
998 return -ENODEV; 1000 return -ENODEV;
999 1001
1002 rcd = kmalloc(rcd_len, GFP_KERNEL);
1003 if (!rcd) {
1004 rc = -ENOMEM;
1005 goto out;
1006 }
1000skip: 1007skip:
1001 rc = erst_get_record_id_next(&reader_pos, &record_id); 1008 rc = erst_get_record_id_next(&reader_pos, &record_id);
1002 if (rc) 1009 if (rc)
@@ -1004,22 +1011,27 @@ skip:
1004 1011
1005 /* no more record */ 1012 /* no more record */
1006 if (record_id == APEI_ERST_INVALID_RECORD_ID) { 1013 if (record_id == APEI_ERST_INVALID_RECORD_ID) {
1007 rc = -1; 1014 rc = -EINVAL;
1008 goto out; 1015 goto out;
1009 } 1016 }
1010 1017
1011 len = erst_read(record_id, &rcd->hdr, sizeof(*rcd) + 1018 len = erst_read(record_id, &rcd->hdr, rcd_len);
1012 erst_info.bufsize);
1013 /* The record may be cleared by others, try read next record */ 1019 /* The record may be cleared by others, try read next record */
1014 if (len == -ENOENT) 1020 if (len == -ENOENT)
1015 goto skip; 1021 goto skip;
1016 else if (len < 0) { 1022 else if (len < sizeof(*rcd)) {
1017 rc = -1; 1023 rc = -EIO;
1018 goto out; 1024 goto out;
1019 } 1025 }
1020 if (uuid_le_cmp(rcd->hdr.creator_id, CPER_CREATOR_PSTORE) != 0) 1026 if (uuid_le_cmp(rcd->hdr.creator_id, CPER_CREATOR_PSTORE) != 0)
1021 goto skip; 1027 goto skip;
1022 1028
1029 *buf = kmalloc(len, GFP_KERNEL);
1030 if (*buf == NULL) {
1031 rc = -ENOMEM;
1032 goto out;
1033 }
1034 memcpy(*buf, rcd->data, len - sizeof(*rcd));
1023 *id = record_id; 1035 *id = record_id;
1024 if (uuid_le_cmp(rcd->sec_hdr.section_type, 1036 if (uuid_le_cmp(rcd->sec_hdr.section_type,
1025 CPER_SECTION_TYPE_DMESG) == 0) 1037 CPER_SECTION_TYPE_DMESG) == 0)
@@ -1037,6 +1049,7 @@ skip:
1037 time->tv_nsec = 0; 1049 time->tv_nsec = 0;
1038 1050
1039out: 1051out:
1052 kfree(rcd);
1040 return (rc < 0) ? rc : (len - sizeof(*rcd)); 1053 return (rc < 0) ? rc : (len - sizeof(*rcd));
1041} 1054}
1042 1055
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 73b2909dddfe..0e8e2de2ed3e 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -224,7 +224,6 @@ static void lapic_timer_state_broadcast(struct acpi_processor *pr,
224/* 224/*
225 * Suspend / resume control 225 * Suspend / resume control
226 */ 226 */
227static int acpi_idle_suspend;
228static u32 saved_bm_rld; 227static u32 saved_bm_rld;
229 228
230static void acpi_idle_bm_rld_save(void) 229static void acpi_idle_bm_rld_save(void)
@@ -243,21 +242,13 @@ static void acpi_idle_bm_rld_restore(void)
243 242
244int acpi_processor_suspend(struct acpi_device * device, pm_message_t state) 243int acpi_processor_suspend(struct acpi_device * device, pm_message_t state)
245{ 244{
246 if (acpi_idle_suspend == 1)
247 return 0;
248
249 acpi_idle_bm_rld_save(); 245 acpi_idle_bm_rld_save();
250 acpi_idle_suspend = 1;
251 return 0; 246 return 0;
252} 247}
253 248
254int acpi_processor_resume(struct acpi_device * device) 249int acpi_processor_resume(struct acpi_device * device)
255{ 250{
256 if (acpi_idle_suspend == 0)
257 return 0;
258
259 acpi_idle_bm_rld_restore(); 251 acpi_idle_bm_rld_restore();
260 acpi_idle_suspend = 0;
261 return 0; 252 return 0;
262} 253}
263 254
@@ -763,13 +754,6 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
763 754
764 local_irq_disable(); 755 local_irq_disable();
765 756
766 /* Do not access any ACPI IO ports in suspend path */
767 if (acpi_idle_suspend) {
768 local_irq_enable();
769 cpu_relax();
770 return -EINVAL;
771 }
772
773 lapic_timer_state_broadcast(pr, cx, 1); 757 lapic_timer_state_broadcast(pr, cx, 1);
774 kt1 = ktime_get_real(); 758 kt1 = ktime_get_real();
775 acpi_idle_do_entry(cx); 759 acpi_idle_do_entry(cx);
@@ -810,13 +794,6 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
810 794
811 local_irq_disable(); 795 local_irq_disable();
812 796
813 if (acpi_idle_suspend) {
814 local_irq_enable();
815 cpu_relax();
816 return -EINVAL;
817 }
818
819
820 if (cx->entry_method != ACPI_CSTATE_FFH) { 797 if (cx->entry_method != ACPI_CSTATE_FFH) {
821 current_thread_info()->status &= ~TS_POLLING; 798 current_thread_info()->status &= ~TS_POLLING;
822 /* 799 /*
@@ -895,12 +872,6 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
895 if (unlikely(!pr)) 872 if (unlikely(!pr))
896 return -EINVAL; 873 return -EINVAL;
897 874
898
899 if (acpi_idle_suspend) {
900 cpu_relax();
901 return -EINVAL;
902 }
903
904 if (!cx->bm_sts_skip && acpi_idle_bm_check()) { 875 if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
905 if (drv->safe_state_index >= 0) { 876 if (drv->safe_state_index >= 0) {
906 return drv->states[drv->safe_state_index].enter(dev, 877 return drv->states[drv->safe_state_index].enter(dev,
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index fb7b90b05922..cf26222a93c5 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -390,6 +390,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
390 /* Promise */ 390 /* Promise */
391 { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */ 391 { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */
392 392
393 /* Asmedia */
394 { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1061 */
395
393 /* Generic, PCI class code for AHCI */ 396 /* Generic, PCI class code for AHCI */
394 { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 397 { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
395 PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci }, 398 PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci },
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index 004f2ce3dc73..43b875810d1b 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -65,9 +65,9 @@ static struct scsi_host_template ahci_platform_sht = {
65static int __init ahci_probe(struct platform_device *pdev) 65static int __init ahci_probe(struct platform_device *pdev)
66{ 66{
67 struct device *dev = &pdev->dev; 67 struct device *dev = &pdev->dev;
68 struct ahci_platform_data *pdata = dev->platform_data; 68 struct ahci_platform_data *pdata = dev_get_platdata(dev);
69 const struct platform_device_id *id = platform_get_device_id(pdev); 69 const struct platform_device_id *id = platform_get_device_id(pdev);
70 struct ata_port_info pi = ahci_port_info[id->driver_data]; 70 struct ata_port_info pi = ahci_port_info[id ? id->driver_data : 0];
71 const struct ata_port_info *ppi[] = { &pi, NULL }; 71 const struct ata_port_info *ppi[] = { &pi, NULL };
72 struct ahci_host_priv *hpriv; 72 struct ahci_host_priv *hpriv;
73 struct ata_host *host; 73 struct ata_host *host;
@@ -191,7 +191,7 @@ err0:
191static int __devexit ahci_remove(struct platform_device *pdev) 191static int __devexit ahci_remove(struct platform_device *pdev)
192{ 192{
193 struct device *dev = &pdev->dev; 193 struct device *dev = &pdev->dev;
194 struct ahci_platform_data *pdata = dev->platform_data; 194 struct ahci_platform_data *pdata = dev_get_platdata(dev);
195 struct ata_host *host = dev_get_drvdata(dev); 195 struct ata_host *host = dev_get_drvdata(dev);
196 196
197 ata_host_detach(host); 197 ata_host_detach(host);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index f22957c2769a..a9b282038000 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -2883,7 +2883,7 @@ int ata_eh_reset(struct ata_link *link, int classify,
2883 sata_scr_read(link, SCR_STATUS, &sstatus)) 2883 sata_scr_read(link, SCR_STATUS, &sstatus))
2884 rc = -ERESTART; 2884 rc = -ERESTART;
2885 2885
2886 if (rc == -ERESTART || try >= max_tries) { 2886 if (try >= max_tries) {
2887 /* 2887 /*
2888 * Thaw host port even if reset failed, so that the port 2888 * Thaw host port even if reset failed, so that the port
2889 * can be retried on the next phy event. This risks 2889 * can be retried on the next phy event. This risks
@@ -2909,6 +2909,16 @@ int ata_eh_reset(struct ata_link *link, int classify,
2909 ata_eh_acquire(ap); 2909 ata_eh_acquire(ap);
2910 } 2910 }
2911 2911
2912 /*
2913 * While disks spinup behind PMP, some controllers fail sending SRST.
2914 * They need to be reset - as well as the PMP - before retrying.
2915 */
2916 if (rc == -ERESTART) {
2917 if (ata_is_host_link(link))
2918 ata_eh_thaw_port(ap);
2919 goto out;
2920 }
2921
2912 if (try == max_tries - 1) { 2922 if (try == max_tries - 1) {
2913 sata_down_spd_limit(link, 0); 2923 sata_down_spd_limit(link, 0);
2914 if (slave) 2924 if (slave)
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
index 104462dbc524..21b80c555c60 100644
--- a/drivers/ata/libata-pmp.c
+++ b/drivers/ata/libata-pmp.c
@@ -389,12 +389,9 @@ static void sata_pmp_quirks(struct ata_port *ap)
389 /* link reports offline after LPM */ 389 /* link reports offline after LPM */
390 link->flags |= ATA_LFLAG_NO_LPM; 390 link->flags |= ATA_LFLAG_NO_LPM;
391 391
392 /* Class code report is unreliable and SRST 392 /* Class code report is unreliable. */
393 * times out under certain configurations.
394 */
395 if (link->pmp < 5) 393 if (link->pmp < 5)
396 link->flags |= ATA_LFLAG_NO_SRST | 394 link->flags |= ATA_LFLAG_ASSUME_ATA;
397 ATA_LFLAG_ASSUME_ATA;
398 395
399 /* port 5 is for SEMB device and it doesn't like SRST */ 396 /* port 5 is for SEMB device and it doesn't like SRST */
400 if (link->pmp == 5) 397 if (link->pmp == 5)
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 72a9770ac42f..2a5412e7e9c1 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1217,6 +1217,10 @@ void ata_scsi_slave_destroy(struct scsi_device *sdev)
1217 1217
1218/** 1218/**
1219 * __ata_change_queue_depth - helper for ata_scsi_change_queue_depth 1219 * __ata_change_queue_depth - helper for ata_scsi_change_queue_depth
1220 * @ap: ATA port to which the device change the queue depth
1221 * @sdev: SCSI device to configure queue depth for
1222 * @queue_depth: new queue depth
1223 * @reason: calling context
1220 * 1224 *
1221 * libsas and libata have different approaches for associating a sdev to 1225 * libsas and libata have different approaches for associating a sdev to
1222 * its ata_port. 1226 * its ata_port.
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 63d53277d6a9..4cadfa28f940 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -2533,10 +2533,12 @@ static int ata_pci_init_one(struct pci_dev *pdev,
2533 if (rc) 2533 if (rc)
2534 goto out; 2534 goto out;
2535 2535
2536#ifdef CONFIG_ATA_BMDMA
2536 if (bmdma) 2537 if (bmdma)
2537 /* prepare and activate BMDMA host */ 2538 /* prepare and activate BMDMA host */
2538 rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host); 2539 rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
2539 else 2540 else
2541#endif
2540 /* prepare and activate SFF host */ 2542 /* prepare and activate SFF host */
2541 rc = ata_pci_sff_prepare_host(pdev, ppi, &host); 2543 rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
2542 if (rc) 2544 if (rc)
@@ -2544,10 +2546,12 @@ static int ata_pci_init_one(struct pci_dev *pdev,
2544 host->private_data = host_priv; 2546 host->private_data = host_priv;
2545 host->flags |= hflags; 2547 host->flags |= hflags;
2546 2548
2549#ifdef CONFIG_ATA_BMDMA
2547 if (bmdma) { 2550 if (bmdma) {
2548 pci_set_master(pdev); 2551 pci_set_master(pdev);
2549 rc = ata_pci_sff_activate_host(host, ata_bmdma_interrupt, sht); 2552 rc = ata_pci_sff_activate_host(host, ata_bmdma_interrupt, sht);
2550 } else 2553 } else
2554#endif
2551 rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht); 2555 rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht);
2552out: 2556out:
2553 if (rc == 0) 2557 if (rc == 0)
diff --git a/drivers/ata/pata_of_platform.c b/drivers/ata/pata_of_platform.c
index a72ab0dde4e5..2a472c5bb7db 100644
--- a/drivers/ata/pata_of_platform.c
+++ b/drivers/ata/pata_of_platform.c
@@ -52,7 +52,7 @@ static int __devinit pata_of_platform_probe(struct platform_device *ofdev)
52 } 52 }
53 53
54 ret = of_irq_to_resource(dn, 0, &irq_res); 54 ret = of_irq_to_resource(dn, 0, &irq_res);
55 if (ret == NO_IRQ) 55 if (!ret)
56 irq_res.start = irq_res.end = 0; 56 irq_res.start = irq_res.end = 0;
57 else 57 else
58 irq_res.flags = 0; 58 irq_res.flags = 0;
diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
index 447d9c05fb5a..95ec435f0eb4 100644
--- a/drivers/ata/sata_sis.c
+++ b/drivers/ata/sata_sis.c
@@ -104,7 +104,7 @@ static const struct ata_port_info sis_port_info = {
104}; 104};
105 105
106MODULE_AUTHOR("Uwe Koziolek"); 106MODULE_AUTHOR("Uwe Koziolek");
107MODULE_DESCRIPTION("low-level driver for Silicon Integratad Systems SATA controller"); 107MODULE_DESCRIPTION("low-level driver for Silicon Integrated Systems SATA controller");
108MODULE_LICENSE("GPL"); 108MODULE_LICENSE("GPL");
109MODULE_DEVICE_TABLE(pci, sis_pci_tbl); 109MODULE_DEVICE_TABLE(pci, sis_pci_tbl);
110MODULE_VERSION(DRV_VERSION); 110MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 82c865452c70..919daa7cd5b1 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -22,6 +22,7 @@
22#include <linux/kallsyms.h> 22#include <linux/kallsyms.h>
23#include <linux/mutex.h> 23#include <linux/mutex.h>
24#include <linux/async.h> 24#include <linux/async.h>
25#include <linux/pm_runtime.h>
25 26
26#include "base.h" 27#include "base.h"
27#include "power/power.h" 28#include "power/power.h"
@@ -1743,6 +1744,10 @@ void device_shutdown(void)
1743 list_del_init(&dev->kobj.entry); 1744 list_del_init(&dev->kobj.entry);
1744 spin_unlock(&devices_kset->list_lock); 1745 spin_unlock(&devices_kset->list_lock);
1745 1746
1747 /* Don't allow any more runtime suspends */
1748 pm_runtime_get_noresume(dev);
1749 pm_runtime_barrier(dev);
1750
1746 if (dev->bus && dev->bus->shutdown) { 1751 if (dev->bus && dev->bus->shutdown) {
1747 dev_dbg(dev, "shutdown\n"); 1752 dev_dbg(dev, "shutdown\n");
1748 dev->bus->shutdown(dev); 1753 dev->bus->shutdown(dev);
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 793f796c4da3..5693ecee9a40 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -127,12 +127,13 @@ static ssize_t node_read_meminfo(struct sys_device * dev,
127 nid, K(node_page_state(nid, NR_WRITEBACK)), 127 nid, K(node_page_state(nid, NR_WRITEBACK)),
128 nid, K(node_page_state(nid, NR_FILE_PAGES)), 128 nid, K(node_page_state(nid, NR_FILE_PAGES)),
129 nid, K(node_page_state(nid, NR_FILE_MAPPED)), 129 nid, K(node_page_state(nid, NR_FILE_MAPPED)),
130 nid, K(node_page_state(nid, NR_ANON_PAGES)
131#ifdef CONFIG_TRANSPARENT_HUGEPAGE 130#ifdef CONFIG_TRANSPARENT_HUGEPAGE
131 nid, K(node_page_state(nid, NR_ANON_PAGES)
132 + node_page_state(nid, NR_ANON_TRANSPARENT_HUGEPAGES) * 132 + node_page_state(nid, NR_ANON_TRANSPARENT_HUGEPAGES) *
133 HPAGE_PMD_NR 133 HPAGE_PMD_NR),
134#else
135 nid, K(node_page_state(nid, NR_ANON_PAGES)),
134#endif 136#endif
135 ),
136 nid, K(node_page_state(nid, NR_SHMEM)), 137 nid, K(node_page_state(nid, NR_SHMEM)),
137 nid, node_page_state(nid, NR_KERNEL_STACK) * 138 nid, node_page_state(nid, NR_KERNEL_STACK) *
138 THREAD_SIZE / 1024, 139 THREAD_SIZE / 1024,
@@ -143,13 +144,14 @@ static ssize_t node_read_meminfo(struct sys_device * dev,
143 nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE) + 144 nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE) +
144 node_page_state(nid, NR_SLAB_UNRECLAIMABLE)), 145 node_page_state(nid, NR_SLAB_UNRECLAIMABLE)),
145 nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE)), 146 nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE)),
146 nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE))
147#ifdef CONFIG_TRANSPARENT_HUGEPAGE 147#ifdef CONFIG_TRANSPARENT_HUGEPAGE
148 nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE))
148 , nid, 149 , nid,
149 K(node_page_state(nid, NR_ANON_TRANSPARENT_HUGEPAGES) * 150 K(node_page_state(nid, NR_ANON_TRANSPARENT_HUGEPAGES) *
150 HPAGE_PMD_NR) 151 HPAGE_PMD_NR));
152#else
153 nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE)));
151#endif 154#endif
152 );
153 n += hugetlb_report_node_meminfo(nid, buf + n); 155 n += hugetlb_report_node_meminfo(nid, buf + n);
154 return n; 156 return n;
155} 157}
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index 5f0f85d5c576..428e55e012dc 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -229,7 +229,8 @@ int pm_clk_suspend(struct device *dev)
229 229
230 list_for_each_entry_reverse(ce, &psd->clock_list, node) { 230 list_for_each_entry_reverse(ce, &psd->clock_list, node) {
231 if (ce->status < PCE_STATUS_ERROR) { 231 if (ce->status < PCE_STATUS_ERROR) {
232 clk_disable(ce->clk); 232 if (ce->status == PCE_STATUS_ENABLED)
233 clk_disable(ce->clk);
233 ce->status = PCE_STATUS_ACQUIRED; 234 ce->status = PCE_STATUS_ACQUIRED;
234 } 235 }
235 } 236 }
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 7fa098464dae..c3d2dfcf438d 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -920,7 +920,8 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
920 End: 920 End:
921 if (!error) { 921 if (!error) {
922 dev->power.is_suspended = true; 922 dev->power.is_suspended = true;
923 if (dev->power.wakeup_path && dev->parent) 923 if (dev->power.wakeup_path
924 && dev->parent && !dev->parent->power.ignore_children)
924 dev->parent->power.wakeup_path = true; 925 dev->parent->power.wakeup_path = true;
925 } 926 }
926 927
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index 434a6c011675..95706fa24c73 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -669,7 +669,7 @@ struct srcu_notifier_head *opp_get_notifier(struct device *dev)
669 struct device_opp *dev_opp = find_device_opp(dev); 669 struct device_opp *dev_opp = find_device_opp(dev);
670 670
671 if (IS_ERR(dev_opp)) 671 if (IS_ERR(dev_opp))
672 return ERR_PTR(PTR_ERR(dev_opp)); /* matching type */ 672 return ERR_CAST(dev_opp); /* matching type */
673 673
674 return &dev_opp->head; 674 return &dev_opp->head;
675} 675}
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index 30a94eadc200..86de6c50fc41 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -212,11 +212,9 @@ int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
212 if (!dev || !req) /*guard against callers passing in null */ 212 if (!dev || !req) /*guard against callers passing in null */
213 return -EINVAL; 213 return -EINVAL;
214 214
215 if (dev_pm_qos_request_active(req)) { 215 if (WARN(dev_pm_qos_request_active(req),
216 WARN(1, KERN_ERR "dev_pm_qos_add_request() called for already " 216 "%s() called for already added request\n", __func__))
217 "added request\n");
218 return -EINVAL; 217 return -EINVAL;
219 }
220 218
221 req->dev = dev; 219 req->dev = dev;
222 220
@@ -271,11 +269,9 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req,
271 if (!req) /*guard against callers passing in null */ 269 if (!req) /*guard against callers passing in null */
272 return -EINVAL; 270 return -EINVAL;
273 271
274 if (!dev_pm_qos_request_active(req)) { 272 if (WARN(!dev_pm_qos_request_active(req),
275 WARN(1, KERN_ERR "dev_pm_qos_update_request() called for " 273 "%s() called for unknown object\n", __func__))
276 "unknown object\n");
277 return -EINVAL; 274 return -EINVAL;
278 }
279 275
280 mutex_lock(&dev_pm_qos_mtx); 276 mutex_lock(&dev_pm_qos_mtx);
281 277
@@ -312,11 +308,9 @@ int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
312 if (!req) /*guard against callers passing in null */ 308 if (!req) /*guard against callers passing in null */
313 return -EINVAL; 309 return -EINVAL;
314 310
315 if (!dev_pm_qos_request_active(req)) { 311 if (WARN(!dev_pm_qos_request_active(req),
316 WARN(1, KERN_ERR "dev_pm_qos_remove_request() called for " 312 "%s() called for unknown object\n", __func__))
317 "unknown object\n");
318 return -EINVAL; 313 return -EINVAL;
319 }
320 314
321 mutex_lock(&dev_pm_qos_mtx); 315 mutex_lock(&dev_pm_qos_mtx);
322 316
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 486f94ef24d4..587cce57adae 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -24,6 +24,7 @@
24#include <linux/interrupt.h> 24#include <linux/interrupt.h>
25#include <linux/types.h> 25#include <linux/types.h>
26#include <linux/pci.h> 26#include <linux/pci.h>
27#include <linux/pci-aspm.h>
27#include <linux/kernel.h> 28#include <linux/kernel.h>
28#include <linux/slab.h> 29#include <linux/slab.h>
29#include <linux/delay.h> 30#include <linux/delay.h>
@@ -2600,6 +2601,8 @@ static int fill_cmd(ctlr_info_t *h, CommandList_struct *c, __u8 cmd, void *buff,
2600 c->Request.Timeout = 0; 2601 c->Request.Timeout = 0;
2601 c->Request.CDB[0] = BMIC_WRITE; 2602 c->Request.CDB[0] = BMIC_WRITE;
2602 c->Request.CDB[6] = BMIC_CACHE_FLUSH; 2603 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
2604 c->Request.CDB[7] = (size >> 8) & 0xFF;
2605 c->Request.CDB[8] = size & 0xFF;
2603 break; 2606 break;
2604 case TEST_UNIT_READY: 2607 case TEST_UNIT_READY:
2605 c->Request.CDBLen = 6; 2608 c->Request.CDBLen = 6;
@@ -4319,6 +4322,10 @@ static int __devinit cciss_pci_init(ctlr_info_t *h)
4319 dev_warn(&h->pdev->dev, "controller appears to be disabled\n"); 4322 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
4320 return -ENODEV; 4323 return -ENODEV;
4321 } 4324 }
4325
4326 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
4327 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
4328
4322 err = pci_enable_device(h->pdev); 4329 err = pci_enable_device(h->pdev);
4323 if (err) { 4330 if (err) {
4324 dev_warn(&h->pdev->dev, "Unable to Enable PCI device\n"); 4331 dev_warn(&h->pdev->dev, "Unable to Enable PCI device\n");
@@ -4875,7 +4882,7 @@ static int cciss_request_irq(ctlr_info_t *h,
4875{ 4882{
4876 if (h->msix_vector || h->msi_vector) { 4883 if (h->msix_vector || h->msi_vector) {
4877 if (!request_irq(h->intr[h->intr_mode], msixhandler, 4884 if (!request_irq(h->intr[h->intr_mode], msixhandler,
4878 IRQF_DISABLED, h->devname, h)) 4885 0, h->devname, h))
4879 return 0; 4886 return 0;
4880 dev_err(&h->pdev->dev, "Unable to get msi irq %d" 4887 dev_err(&h->pdev->dev, "Unable to get msi irq %d"
4881 " for %s\n", h->intr[h->intr_mode], 4888 " for %s\n", h->intr[h->intr_mode],
@@ -4884,7 +4891,7 @@ static int cciss_request_irq(ctlr_info_t *h,
4884 } 4891 }
4885 4892
4886 if (!request_irq(h->intr[h->intr_mode], intxhandler, 4893 if (!request_irq(h->intr[h->intr_mode], intxhandler,
4887 IRQF_DISABLED, h->devname, h)) 4894 IRQF_SHARED, h->devname, h))
4888 return 0; 4895 return 0;
4889 dev_err(&h->pdev->dev, "Unable to get irq %d for %s\n", 4896 dev_err(&h->pdev->dev, "Unable to get irq %d for %s\n",
4890 h->intr[h->intr_mode], h->devname); 4897 h->intr[h->intr_mode], h->devname);
@@ -5158,6 +5165,7 @@ reinit_after_soft_reset:
5158 h->cciss_max_sectors = 8192; 5165 h->cciss_max_sectors = 8192;
5159 5166
5160 rebuild_lun_table(h, 1, 0); 5167 rebuild_lun_table(h, 1, 0);
5168 cciss_engage_scsi(h);
5161 h->busy_initializing = 0; 5169 h->busy_initializing = 0;
5162 return 1; 5170 return 1;
5163 5171
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
index 951a4e33b92b..e820b68d2f6c 100644
--- a/drivers/block/cciss_scsi.c
+++ b/drivers/block/cciss_scsi.c
@@ -1720,5 +1720,6 @@ static int cciss_eh_abort_handler(struct scsi_cmnd *scsicmd)
1720/* If no tape support, then these become defined out of existence */ 1720/* If no tape support, then these become defined out of existence */
1721 1721
1722#define cciss_scsi_setup(cntl_num) 1722#define cciss_scsi_setup(cntl_num)
1723#define cciss_engage_scsi(h)
1723 1724
1724#endif /* CONFIG_CISS_SCSI_TAPE */ 1725#endif /* CONFIG_CISS_SCSI_TAPE */
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 3d806820280e..1e888c9e85b3 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -161,17 +161,19 @@ static struct loop_func_table *xfer_funcs[MAX_LO_CRYPT] = {
161 &xor_funcs 161 &xor_funcs
162}; 162};
163 163
164static loff_t get_loop_size(struct loop_device *lo, struct file *file) 164static loff_t get_size(loff_t offset, loff_t sizelimit, struct file *file)
165{ 165{
166 loff_t size, offset, loopsize; 166 loff_t size, loopsize;
167 167
168 /* Compute loopsize in bytes */ 168 /* Compute loopsize in bytes */
169 size = i_size_read(file->f_mapping->host); 169 size = i_size_read(file->f_mapping->host);
170 offset = lo->lo_offset;
171 loopsize = size - offset; 170 loopsize = size - offset;
172 if (lo->lo_sizelimit > 0 && lo->lo_sizelimit < loopsize) 171 /* offset is beyond i_size, wierd but possible */
173 loopsize = lo->lo_sizelimit; 172 if (loopsize < 0)
173 return 0;
174 174
175 if (sizelimit > 0 && sizelimit < loopsize)
176 loopsize = sizelimit;
175 /* 177 /*
176 * Unfortunately, if we want to do I/O on the device, 178 * Unfortunately, if we want to do I/O on the device,
177 * the number of 512-byte sectors has to fit into a sector_t. 179 * the number of 512-byte sectors has to fit into a sector_t.
@@ -179,17 +181,25 @@ static loff_t get_loop_size(struct loop_device *lo, struct file *file)
179 return loopsize >> 9; 181 return loopsize >> 9;
180} 182}
181 183
184static loff_t get_loop_size(struct loop_device *lo, struct file *file)
185{
186 return get_size(lo->lo_offset, lo->lo_sizelimit, file);
187}
188
182static int 189static int
183figure_loop_size(struct loop_device *lo) 190figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit)
184{ 191{
185 loff_t size = get_loop_size(lo, lo->lo_backing_file); 192 loff_t size = get_size(offset, sizelimit, lo->lo_backing_file);
186 sector_t x = (sector_t)size; 193 sector_t x = (sector_t)size;
187 194
188 if (unlikely((loff_t)x != size)) 195 if (unlikely((loff_t)x != size))
189 return -EFBIG; 196 return -EFBIG;
190 197 if (lo->lo_offset != offset)
198 lo->lo_offset = offset;
199 if (lo->lo_sizelimit != sizelimit)
200 lo->lo_sizelimit = sizelimit;
191 set_capacity(lo->lo_disk, x); 201 set_capacity(lo->lo_disk, x);
192 return 0; 202 return 0;
193} 203}
194 204
195static inline int 205static inline int
@@ -372,7 +382,8 @@ do_lo_receive(struct loop_device *lo,
372 382
373 if (retval < 0) 383 if (retval < 0)
374 return retval; 384 return retval;
375 385 if (retval != bvec->bv_len)
386 return -EIO;
376 return 0; 387 return 0;
377} 388}
378 389
@@ -411,7 +422,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
411 422
412 /* 423 /*
413 * We use punch hole to reclaim the free space used by the 424 * We use punch hole to reclaim the free space used by the
414 * image a.k.a. discard. However we do support discard if 425 * image a.k.a. discard. However we do not support discard if
415 * encryption is enabled, because it may give an attacker 426 * encryption is enabled, because it may give an attacker
416 * useful information. 427 * useful information.
417 */ 428 */
@@ -786,7 +797,7 @@ static void loop_config_discard(struct loop_device *lo)
786 } 797 }
787 798
788 q->limits.discard_granularity = inode->i_sb->s_blocksize; 799 q->limits.discard_granularity = inode->i_sb->s_blocksize;
789 q->limits.discard_alignment = inode->i_sb->s_blocksize; 800 q->limits.discard_alignment = 0;
790 q->limits.max_discard_sectors = UINT_MAX >> 9; 801 q->limits.max_discard_sectors = UINT_MAX >> 9;
791 q->limits.discard_zeroes_data = 1; 802 q->limits.discard_zeroes_data = 1;
792 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); 803 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
@@ -1058,9 +1069,7 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
1058 1069
1059 if (lo->lo_offset != info->lo_offset || 1070 if (lo->lo_offset != info->lo_offset ||
1060 lo->lo_sizelimit != info->lo_sizelimit) { 1071 lo->lo_sizelimit != info->lo_sizelimit) {
1061 lo->lo_offset = info->lo_offset; 1072 if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit))
1062 lo->lo_sizelimit = info->lo_sizelimit;
1063 if (figure_loop_size(lo))
1064 return -EFBIG; 1073 return -EFBIG;
1065 } 1074 }
1066 loop_config_discard(lo); 1075 loop_config_discard(lo);
@@ -1246,7 +1255,7 @@ static int loop_set_capacity(struct loop_device *lo, struct block_device *bdev)
1246 err = -ENXIO; 1255 err = -ENXIO;
1247 if (unlikely(lo->lo_state != Lo_bound)) 1256 if (unlikely(lo->lo_state != Lo_bound))
1248 goto out; 1257 goto out;
1249 err = figure_loop_size(lo); 1258 err = figure_loop_size(lo, lo->lo_offset, lo->lo_sizelimit);
1250 if (unlikely(err)) 1259 if (unlikely(err))
1251 goto out; 1260 goto out;
1252 sec = get_capacity(lo->lo_disk); 1261 sec = get_capacity(lo->lo_disk);
@@ -1284,13 +1293,19 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
1284 goto out_unlocked; 1293 goto out_unlocked;
1285 break; 1294 break;
1286 case LOOP_SET_STATUS: 1295 case LOOP_SET_STATUS:
1287 err = loop_set_status_old(lo, (struct loop_info __user *) arg); 1296 err = -EPERM;
1297 if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
1298 err = loop_set_status_old(lo,
1299 (struct loop_info __user *)arg);
1288 break; 1300 break;
1289 case LOOP_GET_STATUS: 1301 case LOOP_GET_STATUS:
1290 err = loop_get_status_old(lo, (struct loop_info __user *) arg); 1302 err = loop_get_status_old(lo, (struct loop_info __user *) arg);
1291 break; 1303 break;
1292 case LOOP_SET_STATUS64: 1304 case LOOP_SET_STATUS64:
1293 err = loop_set_status64(lo, (struct loop_info64 __user *) arg); 1305 err = -EPERM;
1306 if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
1307 err = loop_set_status64(lo,
1308 (struct loop_info64 __user *) arg);
1294 break; 1309 break;
1295 case LOOP_GET_STATUS64: 1310 case LOOP_GET_STATUS64:
1296 err = loop_get_status64(lo, (struct loop_info64 __user *) arg); 1311 err = loop_get_status64(lo, (struct loop_info64 __user *) arg);
diff --git a/drivers/block/paride/pg.c b/drivers/block/paride/pg.c
index 6b9a2000d56a..a79fb4f7ff62 100644
--- a/drivers/block/paride/pg.c
+++ b/drivers/block/paride/pg.c
@@ -630,6 +630,7 @@ static ssize_t pg_read(struct file *filp, char __user *buf, size_t count, loff_t
630 if (dev->status & 0x10) 630 if (dev->status & 0x10)
631 return -ETIME; 631 return -ETIME;
632 632
633 memset(&hdr, 0, sizeof(hdr));
633 hdr.magic = PG_MAGIC; 634 hdr.magic = PG_MAGIC;
634 hdr.dlen = dev->dlen; 635 hdr.dlen = dev->dlen;
635 copy = 0; 636 copy = 0;
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 65cc424359b0..148ab944378d 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -183,10 +183,6 @@ static LIST_HEAD(rbd_client_list); /* clients */
183 183
184static int __rbd_init_snaps_header(struct rbd_device *rbd_dev); 184static int __rbd_init_snaps_header(struct rbd_device *rbd_dev);
185static void rbd_dev_release(struct device *dev); 185static void rbd_dev_release(struct device *dev);
186static ssize_t rbd_snap_rollback(struct device *dev,
187 struct device_attribute *attr,
188 const char *buf,
189 size_t size);
190static ssize_t rbd_snap_add(struct device *dev, 186static ssize_t rbd_snap_add(struct device *dev,
191 struct device_attribute *attr, 187 struct device_attribute *attr,
192 const char *buf, 188 const char *buf,
@@ -461,6 +457,10 @@ static int rbd_header_from_disk(struct rbd_image_header *header,
461 u32 snap_count = le32_to_cpu(ondisk->snap_count); 457 u32 snap_count = le32_to_cpu(ondisk->snap_count);
462 int ret = -ENOMEM; 458 int ret = -ENOMEM;
463 459
460 if (memcmp(ondisk, RBD_HEADER_TEXT, sizeof(RBD_HEADER_TEXT))) {
461 return -ENXIO;
462 }
463
464 init_rwsem(&header->snap_rwsem); 464 init_rwsem(&header->snap_rwsem);
465 header->snap_names_len = le64_to_cpu(ondisk->snap_names_len); 465 header->snap_names_len = le64_to_cpu(ondisk->snap_names_len);
466 header->snapc = kmalloc(sizeof(struct ceph_snap_context) + 466 header->snapc = kmalloc(sizeof(struct ceph_snap_context) +
@@ -1356,32 +1356,6 @@ fail:
1356} 1356}
1357 1357
1358/* 1358/*
1359 * Request sync osd rollback
1360 */
1361static int rbd_req_sync_rollback_obj(struct rbd_device *dev,
1362 u64 snapid,
1363 const char *obj)
1364{
1365 struct ceph_osd_req_op *ops;
1366 int ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_ROLLBACK, 0);
1367 if (ret < 0)
1368 return ret;
1369
1370 ops[0].snap.snapid = snapid;
1371
1372 ret = rbd_req_sync_op(dev, NULL,
1373 CEPH_NOSNAP,
1374 0,
1375 CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
1376 ops,
1377 1, obj, 0, 0, NULL, NULL, NULL);
1378
1379 rbd_destroy_ops(ops);
1380
1381 return ret;
1382}
1383
1384/*
1385 * Request sync osd read 1359 * Request sync osd read
1386 */ 1360 */
1387static int rbd_req_sync_exec(struct rbd_device *dev, 1361static int rbd_req_sync_exec(struct rbd_device *dev,
@@ -1610,8 +1584,13 @@ static int rbd_read_header(struct rbd_device *rbd_dev,
1610 goto out_dh; 1584 goto out_dh;
1611 1585
1612 rc = rbd_header_from_disk(header, dh, snap_count, GFP_KERNEL); 1586 rc = rbd_header_from_disk(header, dh, snap_count, GFP_KERNEL);
1613 if (rc < 0) 1587 if (rc < 0) {
1588 if (rc == -ENXIO) {
1589 pr_warning("unrecognized header format"
1590 " for image %s", rbd_dev->obj);
1591 }
1614 goto out_dh; 1592 goto out_dh;
1593 }
1615 1594
1616 if (snap_count != header->total_snaps) { 1595 if (snap_count != header->total_snaps) {
1617 snap_count = header->total_snaps; 1596 snap_count = header->total_snaps;
@@ -1882,7 +1861,6 @@ static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
1882static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh); 1861static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
1883static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL); 1862static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
1884static DEVICE_ATTR(create_snap, S_IWUSR, NULL, rbd_snap_add); 1863static DEVICE_ATTR(create_snap, S_IWUSR, NULL, rbd_snap_add);
1885static DEVICE_ATTR(rollback_snap, S_IWUSR, NULL, rbd_snap_rollback);
1886 1864
1887static struct attribute *rbd_attrs[] = { 1865static struct attribute *rbd_attrs[] = {
1888 &dev_attr_size.attr, 1866 &dev_attr_size.attr,
@@ -1893,7 +1871,6 @@ static struct attribute *rbd_attrs[] = {
1893 &dev_attr_current_snap.attr, 1871 &dev_attr_current_snap.attr,
1894 &dev_attr_refresh.attr, 1872 &dev_attr_refresh.attr,
1895 &dev_attr_create_snap.attr, 1873 &dev_attr_create_snap.attr,
1896 &dev_attr_rollback_snap.attr,
1897 NULL 1874 NULL
1898}; 1875};
1899 1876
@@ -2424,64 +2401,6 @@ err_unlock:
2424 return ret; 2401 return ret;
2425} 2402}
2426 2403
2427static ssize_t rbd_snap_rollback(struct device *dev,
2428 struct device_attribute *attr,
2429 const char *buf,
2430 size_t count)
2431{
2432 struct rbd_device *rbd_dev = dev_to_rbd(dev);
2433 int ret;
2434 u64 snapid;
2435 u64 cur_ofs;
2436 char *seg_name = NULL;
2437 char *snap_name = kmalloc(count + 1, GFP_KERNEL);
2438 ret = -ENOMEM;
2439 if (!snap_name)
2440 return ret;
2441
2442 /* parse snaps add command */
2443 snprintf(snap_name, count, "%s", buf);
2444 seg_name = kmalloc(RBD_MAX_SEG_NAME_LEN + 1, GFP_NOIO);
2445 if (!seg_name)
2446 goto done;
2447
2448 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
2449
2450 ret = snap_by_name(&rbd_dev->header, snap_name, &snapid, NULL);
2451 if (ret < 0)
2452 goto done_unlock;
2453
2454 dout("snapid=%lld\n", snapid);
2455
2456 cur_ofs = 0;
2457 while (cur_ofs < rbd_dev->header.image_size) {
2458 cur_ofs += rbd_get_segment(&rbd_dev->header,
2459 rbd_dev->obj,
2460 cur_ofs, (u64)-1,
2461 seg_name, NULL);
2462 dout("seg_name=%s\n", seg_name);
2463
2464 ret = rbd_req_sync_rollback_obj(rbd_dev, snapid, seg_name);
2465 if (ret < 0)
2466 pr_warning("could not roll back obj %s err=%d\n",
2467 seg_name, ret);
2468 }
2469
2470 ret = __rbd_update_snaps(rbd_dev);
2471 if (ret < 0)
2472 goto done_unlock;
2473
2474 ret = count;
2475
2476done_unlock:
2477 mutex_unlock(&ctl_mutex);
2478done:
2479 kfree(seg_name);
2480 kfree(snap_name);
2481
2482 return ret;
2483}
2484
2485static struct bus_attribute rbd_bus_attrs[] = { 2404static struct bus_attribute rbd_bus_attrs[] = {
2486 __ATTR(add, S_IWUSR, NULL, rbd_add), 2405 __ATTR(add, S_IWUSR, NULL, rbd_add),
2487 __ATTR(remove, S_IWUSR, NULL, rbd_remove), 2406 __ATTR(remove, S_IWUSR, NULL, rbd_remove),
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index ae3e167e17ad..89ddab127e33 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -16,6 +16,8 @@
16 * handle GCR disks 16 * handle GCR disks
17 */ 17 */
18 18
19#undef DEBUG
20
19#include <linux/stddef.h> 21#include <linux/stddef.h>
20#include <linux/kernel.h> 22#include <linux/kernel.h>
21#include <linux/sched.h> 23#include <linux/sched.h>
@@ -36,13 +38,11 @@
36#include <asm/machdep.h> 38#include <asm/machdep.h>
37#include <asm/pmac_feature.h> 39#include <asm/pmac_feature.h>
38 40
39static DEFINE_MUTEX(swim3_mutex);
40static struct request_queue *swim3_queue;
41static struct gendisk *disks[2];
42static struct request *fd_req;
43
44#define MAX_FLOPPIES 2 41#define MAX_FLOPPIES 2
45 42
43static DEFINE_MUTEX(swim3_mutex);
44static struct gendisk *disks[MAX_FLOPPIES];
45
46enum swim_state { 46enum swim_state {
47 idle, 47 idle,
48 locating, 48 locating,
@@ -177,7 +177,6 @@ struct swim3 {
177 177
178struct floppy_state { 178struct floppy_state {
179 enum swim_state state; 179 enum swim_state state;
180 spinlock_t lock;
181 struct swim3 __iomem *swim3; /* hardware registers */ 180 struct swim3 __iomem *swim3; /* hardware registers */
182 struct dbdma_regs __iomem *dma; /* DMA controller registers */ 181 struct dbdma_regs __iomem *dma; /* DMA controller registers */
183 int swim3_intr; /* interrupt number for SWIM3 */ 182 int swim3_intr; /* interrupt number for SWIM3 */
@@ -204,8 +203,20 @@ struct floppy_state {
204 int wanted; 203 int wanted;
205 struct macio_dev *mdev; 204 struct macio_dev *mdev;
206 char dbdma_cmd_space[5 * sizeof(struct dbdma_cmd)]; 205 char dbdma_cmd_space[5 * sizeof(struct dbdma_cmd)];
206 int index;
207 struct request *cur_req;
207}; 208};
208 209
210#define swim3_err(fmt, arg...) dev_err(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
211#define swim3_warn(fmt, arg...) dev_warn(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
212#define swim3_info(fmt, arg...) dev_info(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
213
214#ifdef DEBUG
215#define swim3_dbg(fmt, arg...) dev_dbg(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
216#else
217#define swim3_dbg(fmt, arg...) do { } while(0)
218#endif
219
209static struct floppy_state floppy_states[MAX_FLOPPIES]; 220static struct floppy_state floppy_states[MAX_FLOPPIES];
210static int floppy_count = 0; 221static int floppy_count = 0;
211static DEFINE_SPINLOCK(swim3_lock); 222static DEFINE_SPINLOCK(swim3_lock);
@@ -224,17 +235,8 @@ static unsigned short write_postamble[] = {
224 0, 0, 0, 0, 0, 0 235 0, 0, 0, 0, 0, 0
225}; 236};
226 237
227static void swim3_select(struct floppy_state *fs, int sel);
228static void swim3_action(struct floppy_state *fs, int action);
229static int swim3_readbit(struct floppy_state *fs, int bit);
230static void do_fd_request(struct request_queue * q);
231static void start_request(struct floppy_state *fs);
232static void set_timeout(struct floppy_state *fs, int nticks,
233 void (*proc)(unsigned long));
234static void scan_track(struct floppy_state *fs);
235static void seek_track(struct floppy_state *fs, int n); 238static void seek_track(struct floppy_state *fs, int n);
236static void init_dma(struct dbdma_cmd *cp, int cmd, void *buf, int count); 239static void init_dma(struct dbdma_cmd *cp, int cmd, void *buf, int count);
237static void setup_transfer(struct floppy_state *fs);
238static void act(struct floppy_state *fs); 240static void act(struct floppy_state *fs);
239static void scan_timeout(unsigned long data); 241static void scan_timeout(unsigned long data);
240static void seek_timeout(unsigned long data); 242static void seek_timeout(unsigned long data);
@@ -254,18 +256,21 @@ static unsigned int floppy_check_events(struct gendisk *disk,
254 unsigned int clearing); 256 unsigned int clearing);
255static int floppy_revalidate(struct gendisk *disk); 257static int floppy_revalidate(struct gendisk *disk);
256 258
257static bool swim3_end_request(int err, unsigned int nr_bytes) 259static bool swim3_end_request(struct floppy_state *fs, int err, unsigned int nr_bytes)
258{ 260{
259 if (__blk_end_request(fd_req, err, nr_bytes)) 261 struct request *req = fs->cur_req;
260 return true; 262 int rc;
261 263
262 fd_req = NULL; 264 swim3_dbg(" end request, err=%d nr_bytes=%d, cur_req=%p\n",
263 return false; 265 err, nr_bytes, req);
264}
265 266
266static bool swim3_end_request_cur(int err) 267 if (err)
267{ 268 nr_bytes = blk_rq_cur_bytes(req);
268 return swim3_end_request(err, blk_rq_cur_bytes(fd_req)); 269 rc = __blk_end_request(req, err, nr_bytes);
270 if (rc)
271 return true;
272 fs->cur_req = NULL;
273 return false;
269} 274}
270 275
271static void swim3_select(struct floppy_state *fs, int sel) 276static void swim3_select(struct floppy_state *fs, int sel)
@@ -303,50 +308,53 @@ static int swim3_readbit(struct floppy_state *fs, int bit)
303 return (stat & DATA) == 0; 308 return (stat & DATA) == 0;
304} 309}
305 310
306static void do_fd_request(struct request_queue * q)
307{
308 int i;
309
310 for(i=0; i<floppy_count; i++) {
311 struct floppy_state *fs = &floppy_states[i];
312 if (fs->mdev->media_bay &&
313 check_media_bay(fs->mdev->media_bay) != MB_FD)
314 continue;
315 start_request(fs);
316 }
317}
318
319static void start_request(struct floppy_state *fs) 311static void start_request(struct floppy_state *fs)
320{ 312{
321 struct request *req; 313 struct request *req;
322 unsigned long x; 314 unsigned long x;
323 315
316 swim3_dbg("start request, initial state=%d\n", fs->state);
317
324 if (fs->state == idle && fs->wanted) { 318 if (fs->state == idle && fs->wanted) {
325 fs->state = available; 319 fs->state = available;
326 wake_up(&fs->wait); 320 wake_up(&fs->wait);
327 return; 321 return;
328 } 322 }
329 while (fs->state == idle) { 323 while (fs->state == idle) {
330 if (!fd_req) { 324 swim3_dbg("start request, idle loop, cur_req=%p\n", fs->cur_req);
331 fd_req = blk_fetch_request(swim3_queue); 325 if (!fs->cur_req) {
332 if (!fd_req) 326 fs->cur_req = blk_fetch_request(disks[fs->index]->queue);
327 swim3_dbg(" fetched request %p\n", fs->cur_req);
328 if (!fs->cur_req)
333 break; 329 break;
334 } 330 }
335 req = fd_req; 331 req = fs->cur_req;
336#if 0 332
337 printk("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%u buf=%p\n", 333 if (fs->mdev->media_bay &&
338 req->rq_disk->disk_name, req->cmd, 334 check_media_bay(fs->mdev->media_bay) != MB_FD) {
339 (long)blk_rq_pos(req), blk_rq_sectors(req), req->buffer); 335 swim3_dbg("%s", " media bay absent, dropping req\n");
340 printk(" errors=%d current_nr_sectors=%u\n", 336 swim3_end_request(fs, -ENODEV, 0);
341 req->errors, blk_rq_cur_sectors(req)); 337 continue;
338 }
339
340#if 0 /* This is really too verbose */
341 swim3_dbg("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%u buf=%p\n",
342 req->rq_disk->disk_name, req->cmd,
343 (long)blk_rq_pos(req), blk_rq_sectors(req),
344 req->buffer);
345 swim3_dbg(" errors=%d current_nr_sectors=%u\n",
346 req->errors, blk_rq_cur_sectors(req));
342#endif 347#endif
343 348
344 if (blk_rq_pos(req) >= fs->total_secs) { 349 if (blk_rq_pos(req) >= fs->total_secs) {
345 swim3_end_request_cur(-EIO); 350 swim3_dbg(" pos out of bounds (%ld, max is %ld)\n",
351 (long)blk_rq_pos(req), (long)fs->total_secs);
352 swim3_end_request(fs, -EIO, 0);
346 continue; 353 continue;
347 } 354 }
348 if (fs->ejected) { 355 if (fs->ejected) {
349 swim3_end_request_cur(-EIO); 356 swim3_dbg("%s", " disk ejected\n");
357 swim3_end_request(fs, -EIO, 0);
350 continue; 358 continue;
351 } 359 }
352 360
@@ -354,7 +362,8 @@ static void start_request(struct floppy_state *fs)
354 if (fs->write_prot < 0) 362 if (fs->write_prot < 0)
355 fs->write_prot = swim3_readbit(fs, WRITE_PROT); 363 fs->write_prot = swim3_readbit(fs, WRITE_PROT);
356 if (fs->write_prot) { 364 if (fs->write_prot) {
357 swim3_end_request_cur(-EIO); 365 swim3_dbg("%s", " try to write, disk write protected\n");
366 swim3_end_request(fs, -EIO, 0);
358 continue; 367 continue;
359 } 368 }
360 } 369 }
@@ -369,7 +378,6 @@ static void start_request(struct floppy_state *fs)
369 x = ((long)blk_rq_pos(req)) % fs->secpercyl; 378 x = ((long)blk_rq_pos(req)) % fs->secpercyl;
370 fs->head = x / fs->secpertrack; 379 fs->head = x / fs->secpertrack;
371 fs->req_sector = x % fs->secpertrack + 1; 380 fs->req_sector = x % fs->secpertrack + 1;
372 fd_req = req;
373 fs->state = do_transfer; 381 fs->state = do_transfer;
374 fs->retries = 0; 382 fs->retries = 0;
375 383
@@ -377,12 +385,14 @@ static void start_request(struct floppy_state *fs)
377 } 385 }
378} 386}
379 387
388static void do_fd_request(struct request_queue * q)
389{
390 start_request(q->queuedata);
391}
392
380static void set_timeout(struct floppy_state *fs, int nticks, 393static void set_timeout(struct floppy_state *fs, int nticks,
381 void (*proc)(unsigned long)) 394 void (*proc)(unsigned long))
382{ 395{
383 unsigned long flags;
384
385 spin_lock_irqsave(&fs->lock, flags);
386 if (fs->timeout_pending) 396 if (fs->timeout_pending)
387 del_timer(&fs->timeout); 397 del_timer(&fs->timeout);
388 fs->timeout.expires = jiffies + nticks; 398 fs->timeout.expires = jiffies + nticks;
@@ -390,7 +400,6 @@ static void set_timeout(struct floppy_state *fs, int nticks,
390 fs->timeout.data = (unsigned long) fs; 400 fs->timeout.data = (unsigned long) fs;
391 add_timer(&fs->timeout); 401 add_timer(&fs->timeout);
392 fs->timeout_pending = 1; 402 fs->timeout_pending = 1;
393 spin_unlock_irqrestore(&fs->lock, flags);
394} 403}
395 404
396static inline void scan_track(struct floppy_state *fs) 405static inline void scan_track(struct floppy_state *fs)
@@ -442,40 +451,45 @@ static inline void setup_transfer(struct floppy_state *fs)
442 struct swim3 __iomem *sw = fs->swim3; 451 struct swim3 __iomem *sw = fs->swim3;
443 struct dbdma_cmd *cp = fs->dma_cmd; 452 struct dbdma_cmd *cp = fs->dma_cmd;
444 struct dbdma_regs __iomem *dr = fs->dma; 453 struct dbdma_regs __iomem *dr = fs->dma;
454 struct request *req = fs->cur_req;
445 455
446 if (blk_rq_cur_sectors(fd_req) <= 0) { 456 if (blk_rq_cur_sectors(req) <= 0) {
447 printk(KERN_ERR "swim3: transfer 0 sectors?\n"); 457 swim3_warn("%s", "Transfer 0 sectors ?\n");
448 return; 458 return;
449 } 459 }
450 if (rq_data_dir(fd_req) == WRITE) 460 if (rq_data_dir(req) == WRITE)
451 n = 1; 461 n = 1;
452 else { 462 else {
453 n = fs->secpertrack - fs->req_sector + 1; 463 n = fs->secpertrack - fs->req_sector + 1;
454 if (n > blk_rq_cur_sectors(fd_req)) 464 if (n > blk_rq_cur_sectors(req))
455 n = blk_rq_cur_sectors(fd_req); 465 n = blk_rq_cur_sectors(req);
456 } 466 }
467
468 swim3_dbg(" setup xfer at sect %d (of %d) head %d for %d\n",
469 fs->req_sector, fs->secpertrack, fs->head, n);
470
457 fs->scount = n; 471 fs->scount = n;
458 swim3_select(fs, fs->head? READ_DATA_1: READ_DATA_0); 472 swim3_select(fs, fs->head? READ_DATA_1: READ_DATA_0);
459 out_8(&sw->sector, fs->req_sector); 473 out_8(&sw->sector, fs->req_sector);
460 out_8(&sw->nsect, n); 474 out_8(&sw->nsect, n);
461 out_8(&sw->gap3, 0); 475 out_8(&sw->gap3, 0);
462 out_le32(&dr->cmdptr, virt_to_bus(cp)); 476 out_le32(&dr->cmdptr, virt_to_bus(cp));
463 if (rq_data_dir(fd_req) == WRITE) { 477 if (rq_data_dir(req) == WRITE) {
464 /* Set up 3 dma commands: write preamble, data, postamble */ 478 /* Set up 3 dma commands: write preamble, data, postamble */
465 init_dma(cp, OUTPUT_MORE, write_preamble, sizeof(write_preamble)); 479 init_dma(cp, OUTPUT_MORE, write_preamble, sizeof(write_preamble));
466 ++cp; 480 ++cp;
467 init_dma(cp, OUTPUT_MORE, fd_req->buffer, 512); 481 init_dma(cp, OUTPUT_MORE, req->buffer, 512);
468 ++cp; 482 ++cp;
469 init_dma(cp, OUTPUT_LAST, write_postamble, sizeof(write_postamble)); 483 init_dma(cp, OUTPUT_LAST, write_postamble, sizeof(write_postamble));
470 } else { 484 } else {
471 init_dma(cp, INPUT_LAST, fd_req->buffer, n * 512); 485 init_dma(cp, INPUT_LAST, req->buffer, n * 512);
472 } 486 }
473 ++cp; 487 ++cp;
474 out_le16(&cp->command, DBDMA_STOP); 488 out_le16(&cp->command, DBDMA_STOP);
475 out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS); 489 out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
476 in_8(&sw->error); 490 in_8(&sw->error);
477 out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS); 491 out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
478 if (rq_data_dir(fd_req) == WRITE) 492 if (rq_data_dir(req) == WRITE)
479 out_8(&sw->control_bis, WRITE_SECTORS); 493 out_8(&sw->control_bis, WRITE_SECTORS);
480 in_8(&sw->intr); 494 in_8(&sw->intr);
481 out_le32(&dr->control, (RUN << 16) | RUN); 495 out_le32(&dr->control, (RUN << 16) | RUN);
@@ -488,12 +502,16 @@ static inline void setup_transfer(struct floppy_state *fs)
488static void act(struct floppy_state *fs) 502static void act(struct floppy_state *fs)
489{ 503{
490 for (;;) { 504 for (;;) {
505 swim3_dbg(" act loop, state=%d, req_cyl=%d, cur_cyl=%d\n",
506 fs->state, fs->req_cyl, fs->cur_cyl);
507
491 switch (fs->state) { 508 switch (fs->state) {
492 case idle: 509 case idle:
493 return; /* XXX shouldn't get here */ 510 return; /* XXX shouldn't get here */
494 511
495 case locating: 512 case locating:
496 if (swim3_readbit(fs, TRACK_ZERO)) { 513 if (swim3_readbit(fs, TRACK_ZERO)) {
514 swim3_dbg("%s", " locate track 0\n");
497 fs->cur_cyl = 0; 515 fs->cur_cyl = 0;
498 if (fs->req_cyl == 0) 516 if (fs->req_cyl == 0)
499 fs->state = do_transfer; 517 fs->state = do_transfer;
@@ -511,7 +529,7 @@ static void act(struct floppy_state *fs)
511 break; 529 break;
512 } 530 }
513 if (fs->req_cyl == fs->cur_cyl) { 531 if (fs->req_cyl == fs->cur_cyl) {
514 printk("whoops, seeking 0\n"); 532 swim3_warn("%s", "Whoops, seeking 0\n");
515 fs->state = do_transfer; 533 fs->state = do_transfer;
516 break; 534 break;
517 } 535 }
@@ -527,7 +545,9 @@ static void act(struct floppy_state *fs)
527 case do_transfer: 545 case do_transfer:
528 if (fs->cur_cyl != fs->req_cyl) { 546 if (fs->cur_cyl != fs->req_cyl) {
529 if (fs->retries > 5) { 547 if (fs->retries > 5) {
530 swim3_end_request_cur(-EIO); 548 swim3_err("Wrong cylinder in transfer, want: %d got %d\n",
549 fs->req_cyl, fs->cur_cyl);
550 swim3_end_request(fs, -EIO, 0);
531 fs->state = idle; 551 fs->state = idle;
532 return; 552 return;
533 } 553 }
@@ -542,7 +562,7 @@ static void act(struct floppy_state *fs)
542 return; 562 return;
543 563
544 default: 564 default:
545 printk(KERN_ERR"swim3: unknown state %d\n", fs->state); 565 swim3_err("Unknown state %d\n", fs->state);
546 return; 566 return;
547 } 567 }
548 } 568 }
@@ -552,59 +572,75 @@ static void scan_timeout(unsigned long data)
552{ 572{
553 struct floppy_state *fs = (struct floppy_state *) data; 573 struct floppy_state *fs = (struct floppy_state *) data;
554 struct swim3 __iomem *sw = fs->swim3; 574 struct swim3 __iomem *sw = fs->swim3;
575 unsigned long flags;
576
577 swim3_dbg("* scan timeout, state=%d\n", fs->state);
555 578
579 spin_lock_irqsave(&swim3_lock, flags);
556 fs->timeout_pending = 0; 580 fs->timeout_pending = 0;
557 out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS); 581 out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
558 out_8(&sw->select, RELAX); 582 out_8(&sw->select, RELAX);
559 out_8(&sw->intr_enable, 0); 583 out_8(&sw->intr_enable, 0);
560 fs->cur_cyl = -1; 584 fs->cur_cyl = -1;
561 if (fs->retries > 5) { 585 if (fs->retries > 5) {
562 swim3_end_request_cur(-EIO); 586 swim3_end_request(fs, -EIO, 0);
563 fs->state = idle; 587 fs->state = idle;
564 start_request(fs); 588 start_request(fs);
565 } else { 589 } else {
566 fs->state = jogging; 590 fs->state = jogging;
567 act(fs); 591 act(fs);
568 } 592 }
593 spin_unlock_irqrestore(&swim3_lock, flags);
569} 594}
570 595
571static void seek_timeout(unsigned long data) 596static void seek_timeout(unsigned long data)
572{ 597{
573 struct floppy_state *fs = (struct floppy_state *) data; 598 struct floppy_state *fs = (struct floppy_state *) data;
574 struct swim3 __iomem *sw = fs->swim3; 599 struct swim3 __iomem *sw = fs->swim3;
600 unsigned long flags;
601
602 swim3_dbg("* seek timeout, state=%d\n", fs->state);
575 603
604 spin_lock_irqsave(&swim3_lock, flags);
576 fs->timeout_pending = 0; 605 fs->timeout_pending = 0;
577 out_8(&sw->control_bic, DO_SEEK); 606 out_8(&sw->control_bic, DO_SEEK);
578 out_8(&sw->select, RELAX); 607 out_8(&sw->select, RELAX);
579 out_8(&sw->intr_enable, 0); 608 out_8(&sw->intr_enable, 0);
580 printk(KERN_ERR "swim3: seek timeout\n"); 609 swim3_err("%s", "Seek timeout\n");
581 swim3_end_request_cur(-EIO); 610 swim3_end_request(fs, -EIO, 0);
582 fs->state = idle; 611 fs->state = idle;
583 start_request(fs); 612 start_request(fs);
613 spin_unlock_irqrestore(&swim3_lock, flags);
584} 614}
585 615
586static void settle_timeout(unsigned long data) 616static void settle_timeout(unsigned long data)
587{ 617{
588 struct floppy_state *fs = (struct floppy_state *) data; 618 struct floppy_state *fs = (struct floppy_state *) data;
589 struct swim3 __iomem *sw = fs->swim3; 619 struct swim3 __iomem *sw = fs->swim3;
620 unsigned long flags;
621
622 swim3_dbg("* settle timeout, state=%d\n", fs->state);
590 623
624 spin_lock_irqsave(&swim3_lock, flags);
591 fs->timeout_pending = 0; 625 fs->timeout_pending = 0;
592 if (swim3_readbit(fs, SEEK_COMPLETE)) { 626 if (swim3_readbit(fs, SEEK_COMPLETE)) {
593 out_8(&sw->select, RELAX); 627 out_8(&sw->select, RELAX);
594 fs->state = locating; 628 fs->state = locating;
595 act(fs); 629 act(fs);
596 return; 630 goto unlock;
597 } 631 }
598 out_8(&sw->select, RELAX); 632 out_8(&sw->select, RELAX);
599 if (fs->settle_time < 2*HZ) { 633 if (fs->settle_time < 2*HZ) {
600 ++fs->settle_time; 634 ++fs->settle_time;
601 set_timeout(fs, 1, settle_timeout); 635 set_timeout(fs, 1, settle_timeout);
602 return; 636 goto unlock;
603 } 637 }
604 printk(KERN_ERR "swim3: seek settle timeout\n"); 638 swim3_err("%s", "Seek settle timeout\n");
605 swim3_end_request_cur(-EIO); 639 swim3_end_request(fs, -EIO, 0);
606 fs->state = idle; 640 fs->state = idle;
607 start_request(fs); 641 start_request(fs);
642 unlock:
643 spin_unlock_irqrestore(&swim3_lock, flags);
608} 644}
609 645
610static void xfer_timeout(unsigned long data) 646static void xfer_timeout(unsigned long data)
@@ -612,8 +648,12 @@ static void xfer_timeout(unsigned long data)
612 struct floppy_state *fs = (struct floppy_state *) data; 648 struct floppy_state *fs = (struct floppy_state *) data;
613 struct swim3 __iomem *sw = fs->swim3; 649 struct swim3 __iomem *sw = fs->swim3;
614 struct dbdma_regs __iomem *dr = fs->dma; 650 struct dbdma_regs __iomem *dr = fs->dma;
651 unsigned long flags;
615 int n; 652 int n;
616 653
654 swim3_dbg("* xfer timeout, state=%d\n", fs->state);
655
656 spin_lock_irqsave(&swim3_lock, flags);
617 fs->timeout_pending = 0; 657 fs->timeout_pending = 0;
618 out_le32(&dr->control, RUN << 16); 658 out_le32(&dr->control, RUN << 16);
619 /* We must wait a bit for dbdma to stop */ 659 /* We must wait a bit for dbdma to stop */
@@ -622,12 +662,13 @@ static void xfer_timeout(unsigned long data)
622 out_8(&sw->intr_enable, 0); 662 out_8(&sw->intr_enable, 0);
623 out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION); 663 out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION);
624 out_8(&sw->select, RELAX); 664 out_8(&sw->select, RELAX);
625 printk(KERN_ERR "swim3: timeout %sing sector %ld\n", 665 swim3_err("Timeout %sing sector %ld\n",
626 (rq_data_dir(fd_req)==WRITE? "writ": "read"), 666 (rq_data_dir(fs->cur_req)==WRITE? "writ": "read"),
627 (long)blk_rq_pos(fd_req)); 667 (long)blk_rq_pos(fs->cur_req));
628 swim3_end_request_cur(-EIO); 668 swim3_end_request(fs, -EIO, 0);
629 fs->state = idle; 669 fs->state = idle;
630 start_request(fs); 670 start_request(fs);
671 spin_unlock_irqrestore(&swim3_lock, flags);
631} 672}
632 673
633static irqreturn_t swim3_interrupt(int irq, void *dev_id) 674static irqreturn_t swim3_interrupt(int irq, void *dev_id)
@@ -638,12 +679,17 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
638 int stat, resid; 679 int stat, resid;
639 struct dbdma_regs __iomem *dr; 680 struct dbdma_regs __iomem *dr;
640 struct dbdma_cmd *cp; 681 struct dbdma_cmd *cp;
682 unsigned long flags;
683 struct request *req = fs->cur_req;
684
685 swim3_dbg("* interrupt, state=%d\n", fs->state);
641 686
687 spin_lock_irqsave(&swim3_lock, flags);
642 intr = in_8(&sw->intr); 688 intr = in_8(&sw->intr);
643 err = (intr & ERROR_INTR)? in_8(&sw->error): 0; 689 err = (intr & ERROR_INTR)? in_8(&sw->error): 0;
644 if ((intr & ERROR_INTR) && fs->state != do_transfer) 690 if ((intr & ERROR_INTR) && fs->state != do_transfer)
645 printk(KERN_ERR "swim3_interrupt, state=%d, dir=%x, intr=%x, err=%x\n", 691 swim3_err("Non-transfer error interrupt: state=%d, dir=%x, intr=%x, err=%x\n",
646 fs->state, rq_data_dir(fd_req), intr, err); 692 fs->state, rq_data_dir(req), intr, err);
647 switch (fs->state) { 693 switch (fs->state) {
648 case locating: 694 case locating:
649 if (intr & SEEN_SECTOR) { 695 if (intr & SEEN_SECTOR) {
@@ -653,10 +699,10 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
653 del_timer(&fs->timeout); 699 del_timer(&fs->timeout);
654 fs->timeout_pending = 0; 700 fs->timeout_pending = 0;
655 if (sw->ctrack == 0xff) { 701 if (sw->ctrack == 0xff) {
656 printk(KERN_ERR "swim3: seen sector but cyl=ff?\n"); 702 swim3_err("%s", "Seen sector but cyl=ff?\n");
657 fs->cur_cyl = -1; 703 fs->cur_cyl = -1;
658 if (fs->retries > 5) { 704 if (fs->retries > 5) {
659 swim3_end_request_cur(-EIO); 705 swim3_end_request(fs, -EIO, 0);
660 fs->state = idle; 706 fs->state = idle;
661 start_request(fs); 707 start_request(fs);
662 } else { 708 } else {
@@ -668,8 +714,8 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
668 fs->cur_cyl = sw->ctrack; 714 fs->cur_cyl = sw->ctrack;
669 fs->cur_sector = sw->csect; 715 fs->cur_sector = sw->csect;
670 if (fs->expect_cyl != -1 && fs->expect_cyl != fs->cur_cyl) 716 if (fs->expect_cyl != -1 && fs->expect_cyl != fs->cur_cyl)
671 printk(KERN_ERR "swim3: expected cyl %d, got %d\n", 717 swim3_err("Expected cyl %d, got %d\n",
672 fs->expect_cyl, fs->cur_cyl); 718 fs->expect_cyl, fs->cur_cyl);
673 fs->state = do_transfer; 719 fs->state = do_transfer;
674 act(fs); 720 act(fs);
675 } 721 }
@@ -704,7 +750,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
704 fs->timeout_pending = 0; 750 fs->timeout_pending = 0;
705 dr = fs->dma; 751 dr = fs->dma;
706 cp = fs->dma_cmd; 752 cp = fs->dma_cmd;
707 if (rq_data_dir(fd_req) == WRITE) 753 if (rq_data_dir(req) == WRITE)
708 ++cp; 754 ++cp;
709 /* 755 /*
710 * Check that the main data transfer has finished. 756 * Check that the main data transfer has finished.
@@ -729,31 +775,32 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
729 if (intr & ERROR_INTR) { 775 if (intr & ERROR_INTR) {
730 n = fs->scount - 1 - resid / 512; 776 n = fs->scount - 1 - resid / 512;
731 if (n > 0) { 777 if (n > 0) {
732 blk_update_request(fd_req, 0, n << 9); 778 blk_update_request(req, 0, n << 9);
733 fs->req_sector += n; 779 fs->req_sector += n;
734 } 780 }
735 if (fs->retries < 5) { 781 if (fs->retries < 5) {
736 ++fs->retries; 782 ++fs->retries;
737 act(fs); 783 act(fs);
738 } else { 784 } else {
739 printk("swim3: error %sing block %ld (err=%x)\n", 785 swim3_err("Error %sing block %ld (err=%x)\n",
740 rq_data_dir(fd_req) == WRITE? "writ": "read", 786 rq_data_dir(req) == WRITE? "writ": "read",
741 (long)blk_rq_pos(fd_req), err); 787 (long)blk_rq_pos(req), err);
742 swim3_end_request_cur(-EIO); 788 swim3_end_request(fs, -EIO, 0);
743 fs->state = idle; 789 fs->state = idle;
744 } 790 }
745 } else { 791 } else {
746 if ((stat & ACTIVE) == 0 || resid != 0) { 792 if ((stat & ACTIVE) == 0 || resid != 0) {
747 /* musta been an error */ 793 /* musta been an error */
748 printk(KERN_ERR "swim3: fd dma: stat=%x resid=%d\n", stat, resid); 794 swim3_err("fd dma error: stat=%x resid=%d\n", stat, resid);
749 printk(KERN_ERR " state=%d, dir=%x, intr=%x, err=%x\n", 795 swim3_err(" state=%d, dir=%x, intr=%x, err=%x\n",
750 fs->state, rq_data_dir(fd_req), intr, err); 796 fs->state, rq_data_dir(req), intr, err);
751 swim3_end_request_cur(-EIO); 797 swim3_end_request(fs, -EIO, 0);
752 fs->state = idle; 798 fs->state = idle;
753 start_request(fs); 799 start_request(fs);
754 break; 800 break;
755 } 801 }
756 if (swim3_end_request(0, fs->scount << 9)) { 802 fs->retries = 0;
803 if (swim3_end_request(fs, 0, fs->scount << 9)) {
757 fs->req_sector += fs->scount; 804 fs->req_sector += fs->scount;
758 if (fs->req_sector > fs->secpertrack) { 805 if (fs->req_sector > fs->secpertrack) {
759 fs->req_sector -= fs->secpertrack; 806 fs->req_sector -= fs->secpertrack;
@@ -770,8 +817,9 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
770 start_request(fs); 817 start_request(fs);
771 break; 818 break;
772 default: 819 default:
773 printk(KERN_ERR "swim3: don't know what to do in state %d\n", fs->state); 820 swim3_err("Don't know what to do in state %d\n", fs->state);
774 } 821 }
822 spin_unlock_irqrestore(&swim3_lock, flags);
775 return IRQ_HANDLED; 823 return IRQ_HANDLED;
776} 824}
777 825
@@ -781,26 +829,31 @@ static void fd_dma_interrupt(int irq, void *dev_id)
781} 829}
782*/ 830*/
783 831
832/* Called under the mutex to grab exclusive access to a drive */
784static int grab_drive(struct floppy_state *fs, enum swim_state state, 833static int grab_drive(struct floppy_state *fs, enum swim_state state,
785 int interruptible) 834 int interruptible)
786{ 835{
787 unsigned long flags; 836 unsigned long flags;
788 837
789 spin_lock_irqsave(&fs->lock, flags); 838 swim3_dbg("%s", "-> grab drive\n");
790 if (fs->state != idle) { 839
840 spin_lock_irqsave(&swim3_lock, flags);
841 if (fs->state != idle && fs->state != available) {
791 ++fs->wanted; 842 ++fs->wanted;
792 while (fs->state != available) { 843 while (fs->state != available) {
844 spin_unlock_irqrestore(&swim3_lock, flags);
793 if (interruptible && signal_pending(current)) { 845 if (interruptible && signal_pending(current)) {
794 --fs->wanted; 846 --fs->wanted;
795 spin_unlock_irqrestore(&fs->lock, flags);
796 return -EINTR; 847 return -EINTR;
797 } 848 }
798 interruptible_sleep_on(&fs->wait); 849 interruptible_sleep_on(&fs->wait);
850 spin_lock_irqsave(&swim3_lock, flags);
799 } 851 }
800 --fs->wanted; 852 --fs->wanted;
801 } 853 }
802 fs->state = state; 854 fs->state = state;
803 spin_unlock_irqrestore(&fs->lock, flags); 855 spin_unlock_irqrestore(&swim3_lock, flags);
856
804 return 0; 857 return 0;
805} 858}
806 859
@@ -808,10 +861,12 @@ static void release_drive(struct floppy_state *fs)
808{ 861{
809 unsigned long flags; 862 unsigned long flags;
810 863
811 spin_lock_irqsave(&fs->lock, flags); 864 swim3_dbg("%s", "-> release drive\n");
865
866 spin_lock_irqsave(&swim3_lock, flags);
812 fs->state = idle; 867 fs->state = idle;
813 start_request(fs); 868 start_request(fs);
814 spin_unlock_irqrestore(&fs->lock, flags); 869 spin_unlock_irqrestore(&swim3_lock, flags);
815} 870}
816 871
817static int fd_eject(struct floppy_state *fs) 872static int fd_eject(struct floppy_state *fs)
@@ -966,6 +1021,7 @@ static int floppy_release(struct gendisk *disk, fmode_t mode)
966{ 1021{
967 struct floppy_state *fs = disk->private_data; 1022 struct floppy_state *fs = disk->private_data;
968 struct swim3 __iomem *sw = fs->swim3; 1023 struct swim3 __iomem *sw = fs->swim3;
1024
969 mutex_lock(&swim3_mutex); 1025 mutex_lock(&swim3_mutex);
970 if (fs->ref_count > 0 && --fs->ref_count == 0) { 1026 if (fs->ref_count > 0 && --fs->ref_count == 0) {
971 swim3_action(fs, MOTOR_OFF); 1027 swim3_action(fs, MOTOR_OFF);
@@ -1031,30 +1087,48 @@ static const struct block_device_operations floppy_fops = {
1031 .revalidate_disk= floppy_revalidate, 1087 .revalidate_disk= floppy_revalidate,
1032}; 1088};
1033 1089
1090static void swim3_mb_event(struct macio_dev* mdev, int mb_state)
1091{
1092 struct floppy_state *fs = macio_get_drvdata(mdev);
1093 struct swim3 __iomem *sw = fs->swim3;
1094
1095 if (!fs)
1096 return;
1097 if (mb_state != MB_FD)
1098 return;
1099
1100 /* Clear state */
1101 out_8(&sw->intr_enable, 0);
1102 in_8(&sw->intr);
1103 in_8(&sw->error);
1104}
1105
1034static int swim3_add_device(struct macio_dev *mdev, int index) 1106static int swim3_add_device(struct macio_dev *mdev, int index)
1035{ 1107{
1036 struct device_node *swim = mdev->ofdev.dev.of_node; 1108 struct device_node *swim = mdev->ofdev.dev.of_node;
1037 struct floppy_state *fs = &floppy_states[index]; 1109 struct floppy_state *fs = &floppy_states[index];
1038 int rc = -EBUSY; 1110 int rc = -EBUSY;
1039 1111
1112 /* Do this first for message macros */
1113 memset(fs, 0, sizeof(*fs));
1114 fs->mdev = mdev;
1115 fs->index = index;
1116
1040 /* Check & Request resources */ 1117 /* Check & Request resources */
1041 if (macio_resource_count(mdev) < 2) { 1118 if (macio_resource_count(mdev) < 2) {
1042 printk(KERN_WARNING "ifd%d: no address for %s\n", 1119 swim3_err("%s", "No address in device-tree\n");
1043 index, swim->full_name);
1044 return -ENXIO; 1120 return -ENXIO;
1045 } 1121 }
1046 if (macio_irq_count(mdev) < 2) { 1122 if (macio_irq_count(mdev) < 1) {
1047 printk(KERN_WARNING "fd%d: no intrs for device %s\n", 1123 swim3_err("%s", "No interrupt in device-tree\n");
1048 index, swim->full_name); 1124 return -ENXIO;
1049 } 1125 }
1050 if (macio_request_resource(mdev, 0, "swim3 (mmio)")) { 1126 if (macio_request_resource(mdev, 0, "swim3 (mmio)")) {
1051 printk(KERN_ERR "fd%d: can't request mmio resource for %s\n", 1127 swim3_err("%s", "Can't request mmio resource\n");
1052 index, swim->full_name);
1053 return -EBUSY; 1128 return -EBUSY;
1054 } 1129 }
1055 if (macio_request_resource(mdev, 1, "swim3 (dma)")) { 1130 if (macio_request_resource(mdev, 1, "swim3 (dma)")) {
1056 printk(KERN_ERR "fd%d: can't request dma resource for %s\n", 1131 swim3_err("%s", "Can't request dma resource\n");
1057 index, swim->full_name);
1058 macio_release_resource(mdev, 0); 1132 macio_release_resource(mdev, 0);
1059 return -EBUSY; 1133 return -EBUSY;
1060 } 1134 }
@@ -1063,22 +1137,18 @@ static int swim3_add_device(struct macio_dev *mdev, int index)
1063 if (mdev->media_bay == NULL) 1137 if (mdev->media_bay == NULL)
1064 pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 1); 1138 pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 1);
1065 1139
1066 memset(fs, 0, sizeof(*fs));
1067 spin_lock_init(&fs->lock);
1068 fs->state = idle; 1140 fs->state = idle;
1069 fs->swim3 = (struct swim3 __iomem *) 1141 fs->swim3 = (struct swim3 __iomem *)
1070 ioremap(macio_resource_start(mdev, 0), 0x200); 1142 ioremap(macio_resource_start(mdev, 0), 0x200);
1071 if (fs->swim3 == NULL) { 1143 if (fs->swim3 == NULL) {
1072 printk("fd%d: couldn't map registers for %s\n", 1144 swim3_err("%s", "Couldn't map mmio registers\n");
1073 index, swim->full_name);
1074 rc = -ENOMEM; 1145 rc = -ENOMEM;
1075 goto out_release; 1146 goto out_release;
1076 } 1147 }
1077 fs->dma = (struct dbdma_regs __iomem *) 1148 fs->dma = (struct dbdma_regs __iomem *)
1078 ioremap(macio_resource_start(mdev, 1), 0x200); 1149 ioremap(macio_resource_start(mdev, 1), 0x200);
1079 if (fs->dma == NULL) { 1150 if (fs->dma == NULL) {
1080 printk("fd%d: couldn't map DMA for %s\n", 1151 swim3_err("%s", "Couldn't map dma registers\n");
1081 index, swim->full_name);
1082 iounmap(fs->swim3); 1152 iounmap(fs->swim3);
1083 rc = -ENOMEM; 1153 rc = -ENOMEM;
1084 goto out_release; 1154 goto out_release;
@@ -1090,31 +1160,25 @@ static int swim3_add_device(struct macio_dev *mdev, int index)
1090 fs->secpercyl = 36; 1160 fs->secpercyl = 36;
1091 fs->secpertrack = 18; 1161 fs->secpertrack = 18;
1092 fs->total_secs = 2880; 1162 fs->total_secs = 2880;
1093 fs->mdev = mdev;
1094 init_waitqueue_head(&fs->wait); 1163 init_waitqueue_head(&fs->wait);
1095 1164
1096 fs->dma_cmd = (struct dbdma_cmd *) DBDMA_ALIGN(fs->dbdma_cmd_space); 1165 fs->dma_cmd = (struct dbdma_cmd *) DBDMA_ALIGN(fs->dbdma_cmd_space);
1097 memset(fs->dma_cmd, 0, 2 * sizeof(struct dbdma_cmd)); 1166 memset(fs->dma_cmd, 0, 2 * sizeof(struct dbdma_cmd));
1098 st_le16(&fs->dma_cmd[1].command, DBDMA_STOP); 1167 st_le16(&fs->dma_cmd[1].command, DBDMA_STOP);
1099 1168
1169 if (mdev->media_bay == NULL || check_media_bay(mdev->media_bay) == MB_FD)
1170 swim3_mb_event(mdev, MB_FD);
1171
1100 if (request_irq(fs->swim3_intr, swim3_interrupt, 0, "SWIM3", fs)) { 1172 if (request_irq(fs->swim3_intr, swim3_interrupt, 0, "SWIM3", fs)) {
1101 printk(KERN_ERR "fd%d: couldn't request irq %d for %s\n", 1173 swim3_err("%s", "Couldn't request interrupt\n");
1102 index, fs->swim3_intr, swim->full_name);
1103 pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 0); 1174 pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 0);
1104 goto out_unmap; 1175 goto out_unmap;
1105 return -EBUSY; 1176 return -EBUSY;
1106 } 1177 }
1107/*
1108 if (request_irq(fs->dma_intr, fd_dma_interrupt, 0, "SWIM3-dma", fs)) {
1109 printk(KERN_ERR "Couldn't get irq %d for SWIM3 DMA",
1110 fs->dma_intr);
1111 return -EBUSY;
1112 }
1113*/
1114 1178
1115 init_timer(&fs->timeout); 1179 init_timer(&fs->timeout);
1116 1180
1117 printk(KERN_INFO "fd%d: SWIM3 floppy controller %s\n", floppy_count, 1181 swim3_info("SWIM3 floppy controller %s\n",
1118 mdev->media_bay ? "in media bay" : ""); 1182 mdev->media_bay ? "in media bay" : "");
1119 1183
1120 return 0; 1184 return 0;
@@ -1132,41 +1196,42 @@ static int swim3_add_device(struct macio_dev *mdev, int index)
1132 1196
1133static int __devinit swim3_attach(struct macio_dev *mdev, const struct of_device_id *match) 1197static int __devinit swim3_attach(struct macio_dev *mdev, const struct of_device_id *match)
1134{ 1198{
1135 int i, rc;
1136 struct gendisk *disk; 1199 struct gendisk *disk;
1200 int index, rc;
1201
1202 index = floppy_count++;
1203 if (index >= MAX_FLOPPIES)
1204 return -ENXIO;
1137 1205
1138 /* Add the drive */ 1206 /* Add the drive */
1139 rc = swim3_add_device(mdev, floppy_count); 1207 rc = swim3_add_device(mdev, index);
1140 if (rc) 1208 if (rc)
1141 return rc; 1209 return rc;
1210 /* Now register that disk. Same comment about failure handling */
1211 disk = disks[index] = alloc_disk(1);
1212 if (disk == NULL)
1213 return -ENOMEM;
1214 disk->queue = blk_init_queue(do_fd_request, &swim3_lock);
1215 if (disk->queue == NULL) {
1216 put_disk(disk);
1217 return -ENOMEM;
1218 }
1219 disk->queue->queuedata = &floppy_states[index];
1142 1220
1143 /* Now create the queue if not there yet */ 1221 if (index == 0) {
1144 if (swim3_queue == NULL) {
1145 /* If we failed, there isn't much we can do as the driver is still 1222 /* If we failed, there isn't much we can do as the driver is still
1146 * too dumb to remove the device, just bail out 1223 * too dumb to remove the device, just bail out
1147 */ 1224 */
1148 if (register_blkdev(FLOPPY_MAJOR, "fd")) 1225 if (register_blkdev(FLOPPY_MAJOR, "fd"))
1149 return 0; 1226 return 0;
1150 swim3_queue = blk_init_queue(do_fd_request, &swim3_lock);
1151 if (swim3_queue == NULL) {
1152 unregister_blkdev(FLOPPY_MAJOR, "fd");
1153 return 0;
1154 }
1155 } 1227 }
1156 1228
1157 /* Now register that disk. Same comment about failure handling */
1158 i = floppy_count++;
1159 disk = disks[i] = alloc_disk(1);
1160 if (disk == NULL)
1161 return 0;
1162
1163 disk->major = FLOPPY_MAJOR; 1229 disk->major = FLOPPY_MAJOR;
1164 disk->first_minor = i; 1230 disk->first_minor = index;
1165 disk->fops = &floppy_fops; 1231 disk->fops = &floppy_fops;
1166 disk->private_data = &floppy_states[i]; 1232 disk->private_data = &floppy_states[index];
1167 disk->queue = swim3_queue;
1168 disk->flags |= GENHD_FL_REMOVABLE; 1233 disk->flags |= GENHD_FL_REMOVABLE;
1169 sprintf(disk->disk_name, "fd%d", i); 1234 sprintf(disk->disk_name, "fd%d", index);
1170 set_capacity(disk, 2880); 1235 set_capacity(disk, 2880);
1171 add_disk(disk); 1236 add_disk(disk);
1172 1237
@@ -1194,6 +1259,9 @@ static struct macio_driver swim3_driver =
1194 .of_match_table = swim3_match, 1259 .of_match_table = swim3_match,
1195 }, 1260 },
1196 .probe = swim3_attach, 1261 .probe = swim3_attach,
1262#ifdef CONFIG_PMAC_MEDIABAY
1263 .mediabay_event = swim3_mb_event,
1264#endif
1197#if 0 1265#if 0
1198 .suspend = swim3_suspend, 1266 .suspend = swim3_suspend,
1199 .resume = swim3_resume, 1267 .resume = swim3_resume,
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 11b41fd40c27..5ccf142ef0b8 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -188,7 +188,7 @@ config BT_MRVL
188 The core driver to support Marvell Bluetooth devices. 188 The core driver to support Marvell Bluetooth devices.
189 189
190 This driver is required if you want to support 190 This driver is required if you want to support
191 Marvell Bluetooth devices, such as 8688/8787. 191 Marvell Bluetooth devices, such as 8688/8787/8797.
192 192
193 Say Y here to compile Marvell Bluetooth driver 193 Say Y here to compile Marvell Bluetooth driver
194 into the kernel or say M to compile it as module. 194 into the kernel or say M to compile it as module.
@@ -201,8 +201,8 @@ config BT_MRVL_SDIO
201 The driver for Marvell Bluetooth chipsets with SDIO interface. 201 The driver for Marvell Bluetooth chipsets with SDIO interface.
202 202
203 This driver is required if you want to use Marvell Bluetooth 203 This driver is required if you want to use Marvell Bluetooth
204 devices with SDIO interface. Currently SD8688/SD8787 chipsets are 204 devices with SDIO interface. Currently SD8688/SD8787/SD8797
205 supported. 205 chipsets are supported.
206 206
207 Say Y here to compile support for Marvell BT-over-SDIO driver 207 Say Y here to compile support for Marvell BT-over-SDIO driver
208 into the kernel or say M to compile it as module. 208 into the kernel or say M to compile it as module.
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index 9ef48167e2cf..27b74b0d547b 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -65,7 +65,7 @@ static const struct btmrvl_sdio_card_reg btmrvl_reg_8688 = {
65 .io_port_1 = 0x01, 65 .io_port_1 = 0x01,
66 .io_port_2 = 0x02, 66 .io_port_2 = 0x02,
67}; 67};
68static const struct btmrvl_sdio_card_reg btmrvl_reg_8787 = { 68static const struct btmrvl_sdio_card_reg btmrvl_reg_87xx = {
69 .cfg = 0x00, 69 .cfg = 0x00,
70 .host_int_mask = 0x02, 70 .host_int_mask = 0x02,
71 .host_intstatus = 0x03, 71 .host_intstatus = 0x03,
@@ -92,7 +92,14 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8688 = {
92static const struct btmrvl_sdio_device btmrvl_sdio_sd8787 = { 92static const struct btmrvl_sdio_device btmrvl_sdio_sd8787 = {
93 .helper = NULL, 93 .helper = NULL,
94 .firmware = "mrvl/sd8787_uapsta.bin", 94 .firmware = "mrvl/sd8787_uapsta.bin",
95 .reg = &btmrvl_reg_8787, 95 .reg = &btmrvl_reg_87xx,
96 .sd_blksz_fw_dl = 256,
97};
98
99static const struct btmrvl_sdio_device btmrvl_sdio_sd8797 = {
100 .helper = NULL,
101 .firmware = "mrvl/sd8797_uapsta.bin",
102 .reg = &btmrvl_reg_87xx,
96 .sd_blksz_fw_dl = 256, 103 .sd_blksz_fw_dl = 256,
97}; 104};
98 105
@@ -103,6 +110,9 @@ static const struct sdio_device_id btmrvl_sdio_ids[] = {
103 /* Marvell SD8787 Bluetooth device */ 110 /* Marvell SD8787 Bluetooth device */
104 { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x911A), 111 { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x911A),
105 .driver_data = (unsigned long) &btmrvl_sdio_sd8787 }, 112 .driver_data = (unsigned long) &btmrvl_sdio_sd8787 },
113 /* Marvell SD8797 Bluetooth device */
114 { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x912A),
115 .driver_data = (unsigned long) &btmrvl_sdio_sd8797 },
106 116
107 { } /* Terminating entry */ 117 { } /* Terminating entry */
108}; 118};
@@ -1076,3 +1086,4 @@ MODULE_LICENSE("GPL v2");
1076MODULE_FIRMWARE("sd8688_helper.bin"); 1086MODULE_FIRMWARE("sd8688_helper.bin");
1077MODULE_FIRMWARE("sd8688.bin"); 1087MODULE_FIRMWARE("sd8688.bin");
1078MODULE_FIRMWARE("mrvl/sd8787_uapsta.bin"); 1088MODULE_FIRMWARE("mrvl/sd8787_uapsta.bin");
1089MODULE_FIRMWARE("mrvl/sd8797_uapsta.bin");
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index f9b726091ad0..eabc437ce500 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -100,6 +100,9 @@ static struct usb_device_id btusb_table[] = {
100 /* Canyon CN-BTU1 with HID interfaces */ 100 /* Canyon CN-BTU1 with HID interfaces */
101 { USB_DEVICE(0x0c10, 0x0000) }, 101 { USB_DEVICE(0x0c10, 0x0000) },
102 102
103 /* Broadcom BCM20702A0 */
104 { USB_DEVICE(0x413c, 0x8197) },
105
103 { } /* Terminating entry */ 106 { } /* Terminating entry */
104}; 107};
105 108
@@ -774,9 +777,8 @@ skip_waking:
774 usb_mark_last_busy(data->udev); 777 usb_mark_last_busy(data->udev);
775 } 778 }
776 779
777 usb_free_urb(urb);
778
779done: 780done:
781 usb_free_urb(urb);
780 return err; 782 return err;
781} 783}
782 784
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 66cd0b8096ca..c92424ca1a55 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -1186,10 +1186,11 @@ static void gen6_cleanup(void)
1186/* Certain Gen5 chipsets require require idling the GPU before 1186/* Certain Gen5 chipsets require require idling the GPU before
1187 * unmapping anything from the GTT when VT-d is enabled. 1187 * unmapping anything from the GTT when VT-d is enabled.
1188 */ 1188 */
1189extern int intel_iommu_gfx_mapped;
1190static inline int needs_idle_maps(void) 1189static inline int needs_idle_maps(void)
1191{ 1190{
1191#ifdef CONFIG_INTEL_IOMMU
1192 const unsigned short gpu_devid = intel_private.pcidev->device; 1192 const unsigned short gpu_devid = intel_private.pcidev->device;
1193 extern int intel_iommu_gfx_mapped;
1193 1194
1194 /* Query intel_iommu to see if we need the workaround. Presumably that 1195 /* Query intel_iommu to see if we need the workaround. Presumably that
1195 * was loaded first. 1196 * was loaded first.
@@ -1198,7 +1199,7 @@ static inline int needs_idle_maps(void)
1198 gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) && 1199 gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
1199 intel_iommu_gfx_mapped) 1200 intel_iommu_gfx_mapped)
1200 return 1; 1201 return 1;
1201 1202#endif
1202 return 0; 1203 return 0;
1203} 1204}
1204 1205
@@ -1236,7 +1237,7 @@ static int i9xx_setup(void)
1236 intel_private.gtt_bus_addr = reg_addr + gtt_offset; 1237 intel_private.gtt_bus_addr = reg_addr + gtt_offset;
1237 } 1238 }
1238 1239
1239 if (needs_idle_maps()); 1240 if (needs_idle_maps())
1240 intel_private.base.do_idle_maps = 1; 1241 intel_private.base.do_idle_maps = 1;
1241 1242
1242 intel_i9xx_setup_flush(); 1243 intel_i9xx_setup_flush();
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 63e19ba56bbe..6035ab8d5ef7 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -941,7 +941,7 @@ void get_random_bytes(void *buf, int nbytes)
941 if (!arch_get_random_long(&v)) 941 if (!arch_get_random_long(&v))
942 break; 942 break;
943 943
944 memcpy(buf, &v, chunk); 944 memcpy(p, &v, chunk);
945 p += chunk; 945 p += chunk;
946 nbytes -= chunk; 946 nbytes -= chunk;
947 } 947 }
diff --git a/drivers/cpufreq/db8500-cpufreq.c b/drivers/cpufreq/db8500-cpufreq.c
index edaa987621ea..f5002015d82e 100644
--- a/drivers/cpufreq/db8500-cpufreq.c
+++ b/drivers/cpufreq/db8500-cpufreq.c
@@ -109,7 +109,7 @@ static unsigned int db8500_cpufreq_getspeed(unsigned int cpu)
109 109
110static int __cpuinit db8500_cpufreq_init(struct cpufreq_policy *policy) 110static int __cpuinit db8500_cpufreq_init(struct cpufreq_policy *policy)
111{ 111{
112 int res; 112 int i, res;
113 113
114 BUILD_BUG_ON(ARRAY_SIZE(idx2opp) + 1 != ARRAY_SIZE(freq_table)); 114 BUILD_BUG_ON(ARRAY_SIZE(idx2opp) + 1 != ARRAY_SIZE(freq_table));
115 115
@@ -120,8 +120,8 @@ static int __cpuinit db8500_cpufreq_init(struct cpufreq_policy *policy)
120 freq_table[3].frequency = 1000000; 120 freq_table[3].frequency = 1000000;
121 } 121 }
122 pr_info("db8500-cpufreq : Available frequencies:\n"); 122 pr_info("db8500-cpufreq : Available frequencies:\n");
123 while (freq_table[i].frequency != CPUFREQ_TABLE_END) 123 for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++)
124 pr_info(" %d Mhz\n", freq_table[i++].frequency/1000); 124 pr_info(" %d Mhz\n", freq_table[i].frequency/1000);
125 125
126 /* get policy fields based on the table */ 126 /* get policy fields based on the table */
127 res = cpufreq_frequency_table_cpuinfo(policy, freq_table); 127 res = cpufreq_frequency_table_cpuinfo(policy, freq_table);
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
index 5c6f56f21443..dcd8babae9eb 100644
--- a/drivers/crypto/mv_cesa.c
+++ b/drivers/crypto/mv_cesa.c
@@ -343,11 +343,13 @@ static void mv_process_hash_current(int first_block)
343 else 343 else
344 op.config |= CFG_MID_FRAG; 344 op.config |= CFG_MID_FRAG;
345 345
346 writel(req_ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A); 346 if (first_block) {
347 writel(req_ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B); 347 writel(req_ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A);
348 writel(req_ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C); 348 writel(req_ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B);
349 writel(req_ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D); 349 writel(req_ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C);
350 writel(req_ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E); 350 writel(req_ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D);
351 writel(req_ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E);
352 }
351 } 353 }
352 354
353 memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config)); 355 memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config));
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
index 643b055ed3cd..8f0491037080 100644
--- a/drivers/devfreq/Kconfig
+++ b/drivers/devfreq/Kconfig
@@ -1,36 +1,29 @@
1config ARCH_HAS_DEVFREQ
2 bool
3 depends on ARCH_HAS_OPP
4 help
5 Denotes that the architecture supports DEVFREQ. If the architecture
6 supports multiple OPP entries per device and the frequency of the
7 devices with OPPs may be altered dynamically, the architecture
8 supports DEVFREQ.
9
10menuconfig PM_DEVFREQ 1menuconfig PM_DEVFREQ
11 bool "Generic Dynamic Voltage and Frequency Scaling (DVFS) support" 2 bool "Generic Dynamic Voltage and Frequency Scaling (DVFS) support"
12 depends on PM_OPP && ARCH_HAS_DEVFREQ
13 help 3 help
14 With OPP support, a device may have a list of frequencies and 4 A device may have a list of frequencies and voltages available.
15 voltages available. DEVFREQ, a generic DVFS framework can be 5 devfreq, a generic DVFS framework can be registered for a device
16 registered for a device with OPP support in order to let the 6 in order to let the governor provided to devfreq choose an
17 governor provided to DEVFREQ choose an operating frequency 7 operating frequency based on the device driver's policy.
18 based on the OPP's list and the policy given with DEVFREQ.
19 8
20 Each device may have its own governor and policy. DEVFREQ can 9 Each device may have its own governor and policy. Devfreq can
21 reevaluate the device state periodically and/or based on the 10 reevaluate the device state periodically and/or based on the
22 OPP list changes (each frequency/voltage pair in OPP may be 11 notification to "nb", a notifier block, of devfreq.
23 disabled or enabled).
24 12
25 Like some CPUs with CPUFREQ, a device may have multiple clocks. 13 Like some CPUs with CPUfreq, a device may have multiple clocks.
26 However, because the clock frequencies of a single device are 14 However, because the clock frequencies of a single device are
27 determined by the single device's state, an instance of DEVFREQ 15 determined by the single device's state, an instance of devfreq
28 is attached to a single device and returns a "representative" 16 is attached to a single device and returns a "representative"
29 clock frequency from the OPP of the device, which is also attached 17 clock frequency of the device, which is also attached
30 to a device by 1-to-1. The device registering DEVFREQ takes the 18 to a device by 1-to-1. The device registering devfreq takes the
31 responsiblity to "interpret" the frequency listed in OPP and 19 responsiblity to "interpret" the representative frequency and
32 to set its every clock accordingly with the "target" callback 20 to set its every clock accordingly with the "target" callback
33 given to DEVFREQ. 21 given to devfreq.
22
23 When OPP is used with the devfreq device, it is recommended to
24 register devfreq's nb to the OPP's notifier head. If OPP is
25 used with the devfreq device, you may use OPP helper
26 functions defined in devfreq.h.
34 27
35if PM_DEVFREQ 28if PM_DEVFREQ
36 29
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 5d15b812377b..59d24e9cb8c5 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -15,7 +15,9 @@
15#include <linux/errno.h> 15#include <linux/errno.h>
16#include <linux/err.h> 16#include <linux/err.h>
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/module.h>
18#include <linux/slab.h> 19#include <linux/slab.h>
20#include <linux/stat.h>
19#include <linux/opp.h> 21#include <linux/opp.h>
20#include <linux/devfreq.h> 22#include <linux/devfreq.h>
21#include <linux/workqueue.h> 23#include <linux/workqueue.h>
@@ -416,10 +418,14 @@ out:
416 */ 418 */
417int devfreq_remove_device(struct devfreq *devfreq) 419int devfreq_remove_device(struct devfreq *devfreq)
418{ 420{
421 bool central_polling;
422
419 if (!devfreq) 423 if (!devfreq)
420 return -EINVAL; 424 return -EINVAL;
421 425
422 if (!devfreq->governor->no_central_polling) { 426 central_polling = !devfreq->governor->no_central_polling;
427
428 if (central_polling) {
423 mutex_lock(&devfreq_list_lock); 429 mutex_lock(&devfreq_list_lock);
424 while (wait_remove_device == devfreq) { 430 while (wait_remove_device == devfreq) {
425 mutex_unlock(&devfreq_list_lock); 431 mutex_unlock(&devfreq_list_lock);
@@ -431,7 +437,7 @@ int devfreq_remove_device(struct devfreq *devfreq)
431 mutex_lock(&devfreq->lock); 437 mutex_lock(&devfreq->lock);
432 _remove_devfreq(devfreq, false); /* it unlocks devfreq->lock */ 438 _remove_devfreq(devfreq, false); /* it unlocks devfreq->lock */
433 439
434 if (!devfreq->governor->no_central_polling) 440 if (central_polling)
435 mutex_unlock(&devfreq_list_lock); 441 mutex_unlock(&devfreq_list_lock);
436 442
437 return 0; 443 return 0;
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index 8af8e864a9cf..73464a62adf7 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -1128,7 +1128,7 @@ static struct of_device_id mpc85xx_mc_err_of_match[] = {
1128 { .compatible = "fsl,p1020-memory-controller", }, 1128 { .compatible = "fsl,p1020-memory-controller", },
1129 { .compatible = "fsl,p1021-memory-controller", }, 1129 { .compatible = "fsl,p1021-memory-controller", },
1130 { .compatible = "fsl,p2020-memory-controller", }, 1130 { .compatible = "fsl,p2020-memory-controller", },
1131 { .compatible = "fsl,p4080-memory-controller", }, 1131 { .compatible = "fsl,qoriq-memory-controller", },
1132 {}, 1132 {},
1133}; 1133};
1134MODULE_DEVICE_TABLE(of, mpc85xx_mc_err_of_match); 1134MODULE_DEVICE_TABLE(of, mpc85xx_mc_err_of_match);
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index bcb1126e3d00..153980be4ee6 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -585,14 +585,12 @@ int dmi_name_in_serial(const char *str)
585} 585}
586 586
587/** 587/**
588 * dmi_name_in_vendors - Check if string is anywhere in the DMI vendor information. 588 * dmi_name_in_vendors - Check if string is in the DMI system or board vendor name
589 * @str: Case sensitive Name 589 * @str: Case sensitive Name
590 */ 590 */
591int dmi_name_in_vendors(const char *str) 591int dmi_name_in_vendors(const char *str)
592{ 592{
593 static int fields[] = { DMI_BIOS_VENDOR, DMI_BIOS_VERSION, DMI_SYS_VENDOR, 593 static int fields[] = { DMI_SYS_VENDOR, DMI_BOARD_VENDOR, DMI_NONE };
594 DMI_PRODUCT_NAME, DMI_PRODUCT_VERSION, DMI_BOARD_VENDOR,
595 DMI_BOARD_NAME, DMI_BOARD_VERSION, DMI_NONE };
596 int i; 594 int i;
597 for (i = 0; fields[i] != DMI_NONE; i++) { 595 for (i = 0; fields[i] != DMI_NONE; i++) {
598 int f = fields[i]; 596 int f = fields[i];
diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
index 8370f72d87ff..b0a81173a268 100644
--- a/drivers/firmware/efivars.c
+++ b/drivers/firmware/efivars.c
@@ -457,7 +457,8 @@ static int efi_pstore_close(struct pstore_info *psi)
457} 457}
458 458
459static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type, 459static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
460 struct timespec *timespec, struct pstore_info *psi) 460 struct timespec *timespec,
461 char **buf, struct pstore_info *psi)
461{ 462{
462 efi_guid_t vendor = LINUX_EFI_CRASH_GUID; 463 efi_guid_t vendor = LINUX_EFI_CRASH_GUID;
463 struct efivars *efivars = psi->data; 464 struct efivars *efivars = psi->data;
@@ -478,7 +479,11 @@ static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
478 timespec->tv_nsec = 0; 479 timespec->tv_nsec = 0;
479 get_var_data_locked(efivars, &efivars->walk_entry->var); 480 get_var_data_locked(efivars, &efivars->walk_entry->var);
480 size = efivars->walk_entry->var.DataSize; 481 size = efivars->walk_entry->var.DataSize;
481 memcpy(psi->buf, efivars->walk_entry->var.Data, size); 482 *buf = kmalloc(size, GFP_KERNEL);
483 if (*buf == NULL)
484 return -ENOMEM;
485 memcpy(*buf, efivars->walk_entry->var.Data,
486 size);
482 efivars->walk_entry = list_entry(efivars->walk_entry->list.next, 487 efivars->walk_entry = list_entry(efivars->walk_entry->list.next,
483 struct efivar_entry, list); 488 struct efivar_entry, list);
484 return size; 489 return size;
@@ -576,7 +581,8 @@ static int efi_pstore_close(struct pstore_info *psi)
576} 581}
577 582
578static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type, 583static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
579 struct timespec *time, struct pstore_info *psi) 584 struct timespec *timespec,
585 char **buf, struct pstore_info *psi)
580{ 586{
581 return -1; 587 return -1;
582} 588}
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
index c811cb107904..2cce44a1d7d0 100644
--- a/drivers/firmware/iscsi_ibft.c
+++ b/drivers/firmware/iscsi_ibft.c
@@ -746,6 +746,37 @@ static void __exit ibft_exit(void)
746 ibft_cleanup(); 746 ibft_cleanup();
747} 747}
748 748
749#ifdef CONFIG_ACPI
750static const struct {
751 char *sign;
752} ibft_signs[] = {
753 /*
754 * One spec says "IBFT", the other says "iBFT". We have to check
755 * for both.
756 */
757 { ACPI_SIG_IBFT },
758 { "iBFT" },
759};
760
761static void __init acpi_find_ibft_region(void)
762{
763 int i;
764 struct acpi_table_header *table = NULL;
765
766 if (acpi_disabled)
767 return;
768
769 for (i = 0; i < ARRAY_SIZE(ibft_signs) && !ibft_addr; i++) {
770 acpi_get_table(ibft_signs[i].sign, 0, &table);
771 ibft_addr = (struct acpi_table_ibft *)table;
772 }
773}
774#else
775static void __init acpi_find_ibft_region(void)
776{
777}
778#endif
779
749/* 780/*
750 * ibft_init() - creates sysfs tree entries for the iBFT data. 781 * ibft_init() - creates sysfs tree entries for the iBFT data.
751 */ 782 */
@@ -753,9 +784,16 @@ static int __init ibft_init(void)
753{ 784{
754 int rc = 0; 785 int rc = 0;
755 786
787 /*
788 As on UEFI systems the setup_arch()/find_ibft_region()
789 is called before ACPI tables are parsed and it only does
790 legacy finding.
791 */
792 if (!ibft_addr)
793 acpi_find_ibft_region();
794
756 if (ibft_addr) { 795 if (ibft_addr) {
757 printk(KERN_INFO "iBFT detected at 0x%llx.\n", 796 pr_info("iBFT detected.\n");
758 (u64)isa_virt_to_bus(ibft_addr));
759 797
760 rc = ibft_check_device(); 798 rc = ibft_check_device();
761 if (rc) 799 if (rc)
diff --git a/drivers/firmware/iscsi_ibft_find.c b/drivers/firmware/iscsi_ibft_find.c
index bfe723266fd8..4da4eb9ae926 100644
--- a/drivers/firmware/iscsi_ibft_find.c
+++ b/drivers/firmware/iscsi_ibft_find.c
@@ -45,13 +45,6 @@ EXPORT_SYMBOL_GPL(ibft_addr);
45static const struct { 45static const struct {
46 char *sign; 46 char *sign;
47} ibft_signs[] = { 47} ibft_signs[] = {
48#ifdef CONFIG_ACPI
49 /*
50 * One spec says "IBFT", the other says "iBFT". We have to check
51 * for both.
52 */
53 { ACPI_SIG_IBFT },
54#endif
55 { "iBFT" }, 48 { "iBFT" },
56 { "BIFT" }, /* Broadcom iSCSI Offload */ 49 { "BIFT" }, /* Broadcom iSCSI Offload */
57}; 50};
@@ -62,14 +55,6 @@ static const struct {
62#define VGA_MEM 0xA0000 /* VGA buffer */ 55#define VGA_MEM 0xA0000 /* VGA buffer */
63#define VGA_SIZE 0x20000 /* 128kB */ 56#define VGA_SIZE 0x20000 /* 128kB */
64 57
65#ifdef CONFIG_ACPI
66static int __init acpi_find_ibft(struct acpi_table_header *header)
67{
68 ibft_addr = (struct acpi_table_ibft *)header;
69 return 0;
70}
71#endif /* CONFIG_ACPI */
72
73static int __init find_ibft_in_mem(void) 58static int __init find_ibft_in_mem(void)
74{ 59{
75 unsigned long pos; 60 unsigned long pos;
@@ -94,6 +79,7 @@ static int __init find_ibft_in_mem(void)
94 * the table cannot be valid. */ 79 * the table cannot be valid. */
95 if (pos + len <= (IBFT_END-1)) { 80 if (pos + len <= (IBFT_END-1)) {
96 ibft_addr = (struct acpi_table_ibft *)virt; 81 ibft_addr = (struct acpi_table_ibft *)virt;
82 pr_info("iBFT found at 0x%lx.\n", pos);
97 goto done; 83 goto done;
98 } 84 }
99 } 85 }
@@ -108,20 +94,12 @@ done:
108 */ 94 */
109unsigned long __init find_ibft_region(unsigned long *sizep) 95unsigned long __init find_ibft_region(unsigned long *sizep)
110{ 96{
111#ifdef CONFIG_ACPI
112 int i;
113#endif
114 ibft_addr = NULL; 97 ibft_addr = NULL;
115 98
116#ifdef CONFIG_ACPI
117 for (i = 0; i < ARRAY_SIZE(ibft_signs) && !ibft_addr; i++)
118 acpi_table_parse(ibft_signs[i].sign, acpi_find_ibft);
119#endif /* CONFIG_ACPI */
120
121 /* iBFT 1.03 section 1.4.3.1 mandates that UEFI machines will 99 /* iBFT 1.03 section 1.4.3.1 mandates that UEFI machines will
122 * only use ACPI for this */ 100 * only use ACPI for this */
123 101
124 if (!ibft_addr && !efi_enabled) 102 if (!efi_enabled)
125 find_ibft_in_mem(); 103 find_ibft_in_mem();
126 104
127 if (ibft_addr) { 105 if (ibft_addr) {
diff --git a/drivers/firmware/sigma.c b/drivers/firmware/sigma.c
index f10fc521951b..1eedb6f7fdab 100644
--- a/drivers/firmware/sigma.c
+++ b/drivers/firmware/sigma.c
@@ -14,13 +14,34 @@
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/sigma.h> 15#include <linux/sigma.h>
16 16
17/* Return: 0==OK, <0==error, =1 ==no more actions */ 17static size_t sigma_action_size(struct sigma_action *sa)
18{
19 size_t payload = 0;
20
21 switch (sa->instr) {
22 case SIGMA_ACTION_WRITEXBYTES:
23 case SIGMA_ACTION_WRITESINGLE:
24 case SIGMA_ACTION_WRITESAFELOAD:
25 payload = sigma_action_len(sa);
26 break;
27 default:
28 break;
29 }
30
31 payload = ALIGN(payload, 2);
32
33 return payload + sizeof(struct sigma_action);
34}
35
36/*
37 * Returns a negative error value in case of an error, 0 if processing of
38 * the firmware should be stopped after this action, 1 otherwise.
39 */
18static int 40static int
19process_sigma_action(struct i2c_client *client, struct sigma_firmware *ssfw) 41process_sigma_action(struct i2c_client *client, struct sigma_action *sa)
20{ 42{
21 struct sigma_action *sa = (void *)(ssfw->fw->data + ssfw->pos);
22 size_t len = sigma_action_len(sa); 43 size_t len = sigma_action_len(sa);
23 int ret = 0; 44 int ret;
24 45
25 pr_debug("%s: instr:%i addr:%#x len:%zu\n", __func__, 46 pr_debug("%s: instr:%i addr:%#x len:%zu\n", __func__,
26 sa->instr, sa->addr, len); 47 sa->instr, sa->addr, len);
@@ -29,44 +50,50 @@ process_sigma_action(struct i2c_client *client, struct sigma_firmware *ssfw)
29 case SIGMA_ACTION_WRITEXBYTES: 50 case SIGMA_ACTION_WRITEXBYTES:
30 case SIGMA_ACTION_WRITESINGLE: 51 case SIGMA_ACTION_WRITESINGLE:
31 case SIGMA_ACTION_WRITESAFELOAD: 52 case SIGMA_ACTION_WRITESAFELOAD:
32 if (ssfw->fw->size < ssfw->pos + len)
33 return -EINVAL;
34 ret = i2c_master_send(client, (void *)&sa->addr, len); 53 ret = i2c_master_send(client, (void *)&sa->addr, len);
35 if (ret < 0) 54 if (ret < 0)
36 return -EINVAL; 55 return -EINVAL;
37 break; 56 break;
38
39 case SIGMA_ACTION_DELAY: 57 case SIGMA_ACTION_DELAY:
40 ret = 0;
41 udelay(len); 58 udelay(len);
42 len = 0; 59 len = 0;
43 break; 60 break;
44
45 case SIGMA_ACTION_END: 61 case SIGMA_ACTION_END:
46 return 1; 62 return 0;
47
48 default: 63 default:
49 return -EINVAL; 64 return -EINVAL;
50 } 65 }
51 66
52 /* when arrive here ret=0 or sent data */ 67 return 1;
53 ssfw->pos += sigma_action_size(sa, len);
54 return ssfw->pos == ssfw->fw->size;
55} 68}
56 69
57static int 70static int
58process_sigma_actions(struct i2c_client *client, struct sigma_firmware *ssfw) 71process_sigma_actions(struct i2c_client *client, struct sigma_firmware *ssfw)
59{ 72{
60 pr_debug("%s: processing %p\n", __func__, ssfw); 73 struct sigma_action *sa;
74 size_t size;
75 int ret;
76
77 while (ssfw->pos + sizeof(*sa) <= ssfw->fw->size) {
78 sa = (struct sigma_action *)(ssfw->fw->data + ssfw->pos);
79
80 size = sigma_action_size(sa);
81 ssfw->pos += size;
82 if (ssfw->pos > ssfw->fw->size || size == 0)
83 break;
84
85 ret = process_sigma_action(client, sa);
61 86
62 while (1) {
63 int ret = process_sigma_action(client, ssfw);
64 pr_debug("%s: action returned %i\n", __func__, ret); 87 pr_debug("%s: action returned %i\n", __func__, ret);
65 if (ret == 1) 88
66 return 0; 89 if (ret <= 0)
67 else if (ret)
68 return ret; 90 return ret;
69 } 91 }
92
93 if (ssfw->pos != ssfw->fw->size)
94 return -EINVAL;
95
96 return 0;
70} 97}
71 98
72int process_sigma_firmware(struct i2c_client *client, const char *name) 99int process_sigma_firmware(struct i2c_client *client, const char *name)
@@ -89,16 +116,24 @@ int process_sigma_firmware(struct i2c_client *client, const char *name)
89 116
90 /* then verify the header */ 117 /* then verify the header */
91 ret = -EINVAL; 118 ret = -EINVAL;
92 if (fw->size < sizeof(*ssfw_head)) 119
120 /*
121 * Reject too small or unreasonable large files. The upper limit has been
122 * chosen a bit arbitrarily, but it should be enough for all practical
123 * purposes and having the limit makes it easier to avoid integer
124 * overflows later in the loading process.
125 */
126 if (fw->size < sizeof(*ssfw_head) || fw->size >= 0x4000000)
93 goto done; 127 goto done;
94 128
95 ssfw_head = (void *)fw->data; 129 ssfw_head = (void *)fw->data;
96 if (memcmp(ssfw_head->magic, SIGMA_MAGIC, ARRAY_SIZE(ssfw_head->magic))) 130 if (memcmp(ssfw_head->magic, SIGMA_MAGIC, ARRAY_SIZE(ssfw_head->magic)))
97 goto done; 131 goto done;
98 132
99 crc = crc32(0, fw->data, fw->size); 133 crc = crc32(0, fw->data + sizeof(*ssfw_head),
134 fw->size - sizeof(*ssfw_head));
100 pr_debug("%s: crc=%x\n", __func__, crc); 135 pr_debug("%s: crc=%x\n", __func__, crc);
101 if (crc != ssfw_head->crc) 136 if (crc != le32_to_cpu(ssfw_head->crc))
102 goto done; 137 goto done;
103 138
104 ssfw.pos = sizeof(*ssfw_head); 139 ssfw.pos = sizeof(*ssfw_head);
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index dbcb0bcfd8da..4e018d6a7639 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -18,7 +18,7 @@ obj-$(CONFIG_ARCH_DAVINCI) += gpio-davinci.o
18obj-$(CONFIG_GPIO_EP93XX) += gpio-ep93xx.o 18obj-$(CONFIG_GPIO_EP93XX) += gpio-ep93xx.o
19obj-$(CONFIG_GPIO_IT8761E) += gpio-it8761e.o 19obj-$(CONFIG_GPIO_IT8761E) += gpio-it8761e.o
20obj-$(CONFIG_GPIO_JANZ_TTL) += gpio-janz-ttl.o 20obj-$(CONFIG_GPIO_JANZ_TTL) += gpio-janz-ttl.o
21obj-$(CONFIG_MACH_KS8695) += gpio-ks8695.o 21obj-$(CONFIG_ARCH_KS8695) += gpio-ks8695.o
22obj-$(CONFIG_GPIO_LANGWELL) += gpio-langwell.o 22obj-$(CONFIG_GPIO_LANGWELL) += gpio-langwell.o
23obj-$(CONFIG_ARCH_LPC32XX) += gpio-lpc32xx.o 23obj-$(CONFIG_ARCH_LPC32XX) += gpio-lpc32xx.o
24obj-$(CONFIG_GPIO_MAX730X) += gpio-max730x.o 24obj-$(CONFIG_GPIO_MAX730X) += gpio-max730x.o
diff --git a/drivers/gpio/gpio-da9052.c b/drivers/gpio/gpio-da9052.c
index 038f5eb8b13d..f8ce29ef9f88 100644
--- a/drivers/gpio/gpio-da9052.c
+++ b/drivers/gpio/gpio-da9052.c
@@ -22,7 +22,6 @@
22#include <linux/mfd/da9052/da9052.h> 22#include <linux/mfd/da9052/da9052.h>
23#include <linux/mfd/da9052/reg.h> 23#include <linux/mfd/da9052/reg.h>
24#include <linux/mfd/da9052/pdata.h> 24#include <linux/mfd/da9052/pdata.h>
25#include <linux/mfd/da9052/gpio.h>
26 25
27#define DA9052_INPUT 1 26#define DA9052_INPUT 1
28#define DA9052_OUTPUT_OPENDRAIN 2 27#define DA9052_OUTPUT_OPENDRAIN 2
@@ -43,6 +42,9 @@
43#define DA9052_GPIO_MASK_UPPER_NIBBLE 0xF0 42#define DA9052_GPIO_MASK_UPPER_NIBBLE 0xF0
44#define DA9052_GPIO_MASK_LOWER_NIBBLE 0x0F 43#define DA9052_GPIO_MASK_LOWER_NIBBLE 0x0F
45#define DA9052_GPIO_NIBBLE_SHIFT 4 44#define DA9052_GPIO_NIBBLE_SHIFT 4
45#define DA9052_IRQ_GPI0 16
46#define DA9052_GPIO_ODD_SHIFT 7
47#define DA9052_GPIO_EVEN_SHIFT 3
46 48
47struct da9052_gpio { 49struct da9052_gpio {
48 struct da9052 *da9052; 50 struct da9052 *da9052;
@@ -104,33 +106,26 @@ static int da9052_gpio_get(struct gpio_chip *gc, unsigned offset)
104static void da9052_gpio_set(struct gpio_chip *gc, unsigned offset, int value) 106static void da9052_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
105{ 107{
106 struct da9052_gpio *gpio = to_da9052_gpio(gc); 108 struct da9052_gpio *gpio = to_da9052_gpio(gc);
107 unsigned char register_value = 0;
108 int ret; 109 int ret;
109 110
110 if (da9052_gpio_port_odd(offset)) { 111 if (da9052_gpio_port_odd(offset)) {
111 if (value) {
112 register_value = DA9052_GPIO_ODD_PORT_MODE;
113 ret = da9052_reg_update(gpio->da9052, (offset >> 1) + 112 ret = da9052_reg_update(gpio->da9052, (offset >> 1) +
114 DA9052_GPIO_0_1_REG, 113 DA9052_GPIO_0_1_REG,
115 DA9052_GPIO_ODD_PORT_MODE, 114 DA9052_GPIO_ODD_PORT_MODE,
116 register_value); 115 value << DA9052_GPIO_ODD_SHIFT);
117 if (ret != 0) 116 if (ret != 0)
118 dev_err(gpio->da9052->dev, 117 dev_err(gpio->da9052->dev,
119 "Failed to updated gpio odd reg,%d", 118 "Failed to updated gpio odd reg,%d",
120 ret); 119 ret);
121 }
122 } else { 120 } else {
123 if (value) {
124 register_value = DA9052_GPIO_EVEN_PORT_MODE;
125 ret = da9052_reg_update(gpio->da9052, (offset >> 1) + 121 ret = da9052_reg_update(gpio->da9052, (offset >> 1) +
126 DA9052_GPIO_0_1_REG, 122 DA9052_GPIO_0_1_REG,
127 DA9052_GPIO_EVEN_PORT_MODE, 123 DA9052_GPIO_EVEN_PORT_MODE,
128 register_value); 124 value << DA9052_GPIO_EVEN_SHIFT);
129 if (ret != 0) 125 if (ret != 0)
130 dev_err(gpio->da9052->dev, 126 dev_err(gpio->da9052->dev,
131 "Failed to updated gpio even reg,%d", 127 "Failed to updated gpio even reg,%d",
132 ret); 128 ret);
133 }
134 } 129 }
135} 130}
136 131
@@ -201,9 +196,9 @@ static struct gpio_chip reference_gp __devinitdata = {
201 .direction_input = da9052_gpio_direction_input, 196 .direction_input = da9052_gpio_direction_input,
202 .direction_output = da9052_gpio_direction_output, 197 .direction_output = da9052_gpio_direction_output,
203 .to_irq = da9052_gpio_to_irq, 198 .to_irq = da9052_gpio_to_irq,
204 .can_sleep = 1; 199 .can_sleep = 1,
205 .ngpio = 16; 200 .ngpio = 16,
206 .base = -1; 201 .base = -1,
207}; 202};
208 203
209static int __devinit da9052_gpio_probe(struct platform_device *pdev) 204static int __devinit da9052_gpio_probe(struct platform_device *pdev)
diff --git a/drivers/gpio/gpio-ml-ioh.c b/drivers/gpio/gpio-ml-ioh.c
index ea8e73869250..461958fc2264 100644
--- a/drivers/gpio/gpio-ml-ioh.c
+++ b/drivers/gpio/gpio-ml-ioh.c
@@ -332,6 +332,34 @@ static void ioh_irq_mask(struct irq_data *d)
332 &chip->reg->regs[chip->ch].imask); 332 &chip->reg->regs[chip->ch].imask);
333} 333}
334 334
335static void ioh_irq_disable(struct irq_data *d)
336{
337 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
338 struct ioh_gpio *chip = gc->private;
339 unsigned long flags;
340 u32 ien;
341
342 spin_lock_irqsave(&chip->spinlock, flags);
343 ien = ioread32(&chip->reg->regs[chip->ch].ien);
344 ien &= ~(1 << (d->irq - chip->irq_base));
345 iowrite32(ien, &chip->reg->regs[chip->ch].ien);
346 spin_unlock_irqrestore(&chip->spinlock, flags);
347}
348
349static void ioh_irq_enable(struct irq_data *d)
350{
351 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
352 struct ioh_gpio *chip = gc->private;
353 unsigned long flags;
354 u32 ien;
355
356 spin_lock_irqsave(&chip->spinlock, flags);
357 ien = ioread32(&chip->reg->regs[chip->ch].ien);
358 ien |= 1 << (d->irq - chip->irq_base);
359 iowrite32(ien, &chip->reg->regs[chip->ch].ien);
360 spin_unlock_irqrestore(&chip->spinlock, flags);
361}
362
335static irqreturn_t ioh_gpio_handler(int irq, void *dev_id) 363static irqreturn_t ioh_gpio_handler(int irq, void *dev_id)
336{ 364{
337 struct ioh_gpio *chip = dev_id; 365 struct ioh_gpio *chip = dev_id;
@@ -339,7 +367,7 @@ static irqreturn_t ioh_gpio_handler(int irq, void *dev_id)
339 int i, j; 367 int i, j;
340 int ret = IRQ_NONE; 368 int ret = IRQ_NONE;
341 369
342 for (i = 0; i < 8; i++) { 370 for (i = 0; i < 8; i++, chip++) {
343 reg_val = ioread32(&chip->reg->regs[i].istatus); 371 reg_val = ioread32(&chip->reg->regs[i].istatus);
344 for (j = 0; j < num_ports[i]; j++) { 372 for (j = 0; j < num_ports[i]; j++) {
345 if (reg_val & BIT(j)) { 373 if (reg_val & BIT(j)) {
@@ -370,6 +398,8 @@ static __devinit void ioh_gpio_alloc_generic_chip(struct ioh_gpio *chip,
370 ct->chip.irq_mask = ioh_irq_mask; 398 ct->chip.irq_mask = ioh_irq_mask;
371 ct->chip.irq_unmask = ioh_irq_unmask; 399 ct->chip.irq_unmask = ioh_irq_unmask;
372 ct->chip.irq_set_type = ioh_irq_type; 400 ct->chip.irq_set_type = ioh_irq_type;
401 ct->chip.irq_disable = ioh_irq_disable;
402 ct->chip.irq_enable = ioh_irq_enable;
373 403
374 irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE, 404 irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE,
375 IRQ_NOREQUEST | IRQ_NOPROBE, 0); 405 IRQ_NOREQUEST | IRQ_NOPROBE, 0);
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index ec3fcf0a7e12..5cd04b65c556 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -132,6 +132,15 @@ static int mpc8xxx_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val
132 return 0; 132 return 0;
133} 133}
134 134
135static int mpc5121_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
136{
137 /* GPIO 28..31 are input only on MPC5121 */
138 if (gpio >= 28)
139 return -EINVAL;
140
141 return mpc8xxx_gpio_dir_out(gc, gpio, val);
142}
143
135static int mpc8xxx_gpio_to_irq(struct gpio_chip *gc, unsigned offset) 144static int mpc8xxx_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
136{ 145{
137 struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc); 146 struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc);
@@ -340,11 +349,10 @@ static void __init mpc8xxx_add_controller(struct device_node *np)
340 mm_gc->save_regs = mpc8xxx_gpio_save_regs; 349 mm_gc->save_regs = mpc8xxx_gpio_save_regs;
341 gc->ngpio = MPC8XXX_GPIO_PINS; 350 gc->ngpio = MPC8XXX_GPIO_PINS;
342 gc->direction_input = mpc8xxx_gpio_dir_in; 351 gc->direction_input = mpc8xxx_gpio_dir_in;
343 gc->direction_output = mpc8xxx_gpio_dir_out; 352 gc->direction_output = of_device_is_compatible(np, "fsl,mpc5121-gpio") ?
344 if (of_device_is_compatible(np, "fsl,mpc8572-gpio")) 353 mpc5121_gpio_dir_out : mpc8xxx_gpio_dir_out;
345 gc->get = mpc8572_gpio_get; 354 gc->get = of_device_is_compatible(np, "fsl,mpc8572-gpio") ?
346 else 355 mpc8572_gpio_get : mpc8xxx_gpio_get;
347 gc->get = mpc8xxx_gpio_get;
348 gc->set = mpc8xxx_gpio_set; 356 gc->set = mpc8xxx_gpio_set;
349 gc->to_irq = mpc8xxx_gpio_to_irq; 357 gc->to_irq = mpc8xxx_gpio_to_irq;
350 358
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 0e49d87f6c60..0b0562979171 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -148,13 +148,17 @@ static int _get_gpio_dataout(struct gpio_bank *bank, int gpio)
148 return (__raw_readl(reg) & GPIO_BIT(bank, gpio)) != 0; 148 return (__raw_readl(reg) & GPIO_BIT(bank, gpio)) != 0;
149} 149}
150 150
151#define MOD_REG_BIT(reg, bit_mask, set) \ 151static inline void _gpio_rmw(void __iomem *base, u32 reg, u32 mask, bool set)
152do { \ 152{
153 int l = __raw_readl(base + reg); \ 153 int l = __raw_readl(base + reg);
154 if (set) l |= bit_mask; \ 154
155 else l &= ~bit_mask; \ 155 if (set)
156 __raw_writel(l, base + reg); \ 156 l |= mask;
157} while(0) 157 else
158 l &= ~mask;
159
160 __raw_writel(l, base + reg);
161}
158 162
159/** 163/**
160 * _set_gpio_debounce - low level gpio debounce time 164 * _set_gpio_debounce - low level gpio debounce time
@@ -210,28 +214,28 @@ static inline void set_24xx_gpio_triggering(struct gpio_bank *bank, int gpio,
210 u32 gpio_bit = 1 << gpio; 214 u32 gpio_bit = 1 << gpio;
211 215
212 if (cpu_is_omap44xx()) { 216 if (cpu_is_omap44xx()) {
213 MOD_REG_BIT(OMAP4_GPIO_LEVELDETECT0, gpio_bit, 217 _gpio_rmw(base, OMAP4_GPIO_LEVELDETECT0, gpio_bit,
214 trigger & IRQ_TYPE_LEVEL_LOW); 218 trigger & IRQ_TYPE_LEVEL_LOW);
215 MOD_REG_BIT(OMAP4_GPIO_LEVELDETECT1, gpio_bit, 219 _gpio_rmw(base, OMAP4_GPIO_LEVELDETECT1, gpio_bit,
216 trigger & IRQ_TYPE_LEVEL_HIGH); 220 trigger & IRQ_TYPE_LEVEL_HIGH);
217 MOD_REG_BIT(OMAP4_GPIO_RISINGDETECT, gpio_bit, 221 _gpio_rmw(base, OMAP4_GPIO_RISINGDETECT, gpio_bit,
218 trigger & IRQ_TYPE_EDGE_RISING); 222 trigger & IRQ_TYPE_EDGE_RISING);
219 MOD_REG_BIT(OMAP4_GPIO_FALLINGDETECT, gpio_bit, 223 _gpio_rmw(base, OMAP4_GPIO_FALLINGDETECT, gpio_bit,
220 trigger & IRQ_TYPE_EDGE_FALLING); 224 trigger & IRQ_TYPE_EDGE_FALLING);
221 } else { 225 } else {
222 MOD_REG_BIT(OMAP24XX_GPIO_LEVELDETECT0, gpio_bit, 226 _gpio_rmw(base, OMAP24XX_GPIO_LEVELDETECT0, gpio_bit,
223 trigger & IRQ_TYPE_LEVEL_LOW); 227 trigger & IRQ_TYPE_LEVEL_LOW);
224 MOD_REG_BIT(OMAP24XX_GPIO_LEVELDETECT1, gpio_bit, 228 _gpio_rmw(base, OMAP24XX_GPIO_LEVELDETECT1, gpio_bit,
225 trigger & IRQ_TYPE_LEVEL_HIGH); 229 trigger & IRQ_TYPE_LEVEL_HIGH);
226 MOD_REG_BIT(OMAP24XX_GPIO_RISINGDETECT, gpio_bit, 230 _gpio_rmw(base, OMAP24XX_GPIO_RISINGDETECT, gpio_bit,
227 trigger & IRQ_TYPE_EDGE_RISING); 231 trigger & IRQ_TYPE_EDGE_RISING);
228 MOD_REG_BIT(OMAP24XX_GPIO_FALLINGDETECT, gpio_bit, 232 _gpio_rmw(base, OMAP24XX_GPIO_FALLINGDETECT, gpio_bit,
229 trigger & IRQ_TYPE_EDGE_FALLING); 233 trigger & IRQ_TYPE_EDGE_FALLING);
230 } 234 }
231 if (likely(!(bank->non_wakeup_gpios & gpio_bit))) { 235 if (likely(!(bank->non_wakeup_gpios & gpio_bit))) {
232 if (cpu_is_omap44xx()) { 236 if (cpu_is_omap44xx()) {
233 MOD_REG_BIT(OMAP4_GPIO_IRQWAKEN0, gpio_bit, 237 _gpio_rmw(base, OMAP4_GPIO_IRQWAKEN0, gpio_bit,
234 trigger != 0); 238 trigger != 0);
235 } else { 239 } else {
236 /* 240 /*
237 * GPIO wakeup request can only be generated on edge 241 * GPIO wakeup request can only be generated on edge
@@ -1086,6 +1090,11 @@ omap_mpuio_alloc_gc(struct gpio_bank *bank, unsigned int irq_start,
1086 1090
1087 gc = irq_alloc_generic_chip("MPUIO", 1, irq_start, bank->base, 1091 gc = irq_alloc_generic_chip("MPUIO", 1, irq_start, bank->base,
1088 handle_simple_irq); 1092 handle_simple_irq);
1093 if (!gc) {
1094 dev_err(bank->dev, "Memory alloc failed for gc\n");
1095 return;
1096 }
1097
1089 ct = gc->chip_types; 1098 ct = gc->chip_types;
1090 1099
1091 /* NOTE: No ack required, reading IRQ status clears it. */ 1100 /* NOTE: No ack required, reading IRQ status clears it. */
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 0550dcb85814..d3f3e8f54561 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -546,7 +546,7 @@ static void pca953x_irq_teardown(struct pca953x_chip *chip)
546 * Translate OpenFirmware node properties into platform_data 546 * Translate OpenFirmware node properties into platform_data
547 * WARNING: This is DEPRECATED and will be removed eventually! 547 * WARNING: This is DEPRECATED and will be removed eventually!
548 */ 548 */
549void 549static void
550pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert) 550pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert)
551{ 551{
552 struct device_node *node; 552 struct device_node *node;
@@ -574,7 +574,7 @@ pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert)
574 *invert = *val; 574 *invert = *val;
575} 575}
576#else 576#else
577void 577static void
578pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert) 578pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert)
579{ 579{
580 *gpio_base = -1; 580 *gpio_base = -1;
@@ -596,9 +596,6 @@ static int __devinit device_pca953x_init(struct pca953x_chip *chip, int invert)
596 596
597 /* set platform specific polarity inversion */ 597 /* set platform specific polarity inversion */
598 ret = pca953x_write_reg(chip, PCA953X_INVERT, invert); 598 ret = pca953x_write_reg(chip, PCA953X_INVERT, invert);
599 if (ret)
600 goto out;
601 return 0;
602out: 599out:
603 return ret; 600 return ret;
604} 601}
@@ -640,7 +637,7 @@ static int __devinit pca953x_probe(struct i2c_client *client,
640 struct pca953x_platform_data *pdata; 637 struct pca953x_platform_data *pdata;
641 struct pca953x_chip *chip; 638 struct pca953x_chip *chip;
642 int irq_base=0, invert=0; 639 int irq_base=0, invert=0;
643 int ret = 0; 640 int ret;
644 641
645 chip = kzalloc(sizeof(struct pca953x_chip), GFP_KERNEL); 642 chip = kzalloc(sizeof(struct pca953x_chip), GFP_KERNEL);
646 if (chip == NULL) 643 if (chip == NULL)
@@ -673,10 +670,10 @@ static int __devinit pca953x_probe(struct i2c_client *client,
673 pca953x_setup_gpio(chip, id->driver_data & PCA_GPIO_MASK); 670 pca953x_setup_gpio(chip, id->driver_data & PCA_GPIO_MASK);
674 671
675 if (chip->chip_type == PCA953X_TYPE) 672 if (chip->chip_type == PCA953X_TYPE)
676 device_pca953x_init(chip, invert); 673 ret = device_pca953x_init(chip, invert);
677 else if (chip->chip_type == PCA957X_TYPE)
678 device_pca957x_init(chip, invert);
679 else 674 else
675 ret = device_pca957x_init(chip, invert);
676 if (ret)
680 goto out_failed; 677 goto out_failed;
681 678
682 ret = pca953x_irq_setup(chip, id, irq_base); 679 ret = pca953x_irq_setup(chip, id, irq_base);
diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c
index 093c90bd3c1d..4102f63230fd 100644
--- a/drivers/gpio/gpio-pl061.c
+++ b/drivers/gpio/gpio-pl061.c
@@ -238,10 +238,6 @@ static int pl061_probe(struct amba_device *dev, const struct amba_id *id)
238 int ret, irq, i; 238 int ret, irq, i;
239 static DECLARE_BITMAP(init_irq, NR_IRQS); 239 static DECLARE_BITMAP(init_irq, NR_IRQS);
240 240
241 pdata = dev->dev.platform_data;
242 if (pdata == NULL)
243 return -ENODEV;
244
245 chip = kzalloc(sizeof(*chip), GFP_KERNEL); 241 chip = kzalloc(sizeof(*chip), GFP_KERNEL);
246 if (chip == NULL) 242 if (chip == NULL)
247 return -ENOMEM; 243 return -ENOMEM;
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 45079046b6d5..2418429a9836 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -9,7 +9,6 @@ menuconfig DRM
9 depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && MMU 9 depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && MMU
10 select I2C 10 select I2C
11 select I2C_ALGOBIT 11 select I2C_ALGOBIT
12 select SLOW_WORK
13 help 12 help
14 Kernel-level support for the Direct Rendering Infrastructure (DRI) 13 Kernel-level support for the Direct Rendering Infrastructure (DRI)
15 introduced in XFree86 4.0. If you say Y here, you need to select 14 introduced in XFree86 4.0. If you say Y here, you need to select
@@ -96,6 +95,7 @@ config DRM_I915
96 select FB_CFB_IMAGEBLIT 95 select FB_CFB_IMAGEBLIT
97 # i915 depends on ACPI_VIDEO when ACPI is enabled 96 # i915 depends on ACPI_VIDEO when ACPI is enabled
98 # but for select to work, need to select ACPI_VIDEO's dependencies, ick 97 # but for select to work, need to select ACPI_VIDEO's dependencies, ick
98 select BACKLIGHT_LCD_SUPPORT if ACPI
99 select BACKLIGHT_CLASS_DEVICE if ACPI 99 select BACKLIGHT_CLASS_DEVICE if ACPI
100 select VIDEO_OUTPUT_CONTROL if ACPI 100 select VIDEO_OUTPUT_CONTROL if ACPI
101 select INPUT if ACPI 101 select INPUT if ACPI
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 0d1faa72e1ff..f259a2563204 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -2340,6 +2340,10 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
2340 } 2340 }
2341 2341
2342 if (num_clips && clips_ptr) { 2342 if (num_clips && clips_ptr) {
2343 if (num_clips < 0 || num_clips > DRM_MODE_FB_DIRTY_MAX_CLIPS) {
2344 ret = -EINVAL;
2345 goto out_err1;
2346 }
2343 clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL); 2347 clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL);
2344 if (!clips) { 2348 if (!clips) {
2345 ret = -ENOMEM; 2349 ret = -ENOMEM;
@@ -2585,8 +2589,10 @@ struct drm_property *drm_property_create(struct drm_device *dev, int flags,
2585 property->num_values = num_values; 2589 property->num_values = num_values;
2586 INIT_LIST_HEAD(&property->enum_blob_list); 2590 INIT_LIST_HEAD(&property->enum_blob_list);
2587 2591
2588 if (name) 2592 if (name) {
2589 strncpy(property->name, name, DRM_PROP_NAME_LEN); 2593 strncpy(property->name, name, DRM_PROP_NAME_LEN);
2594 property->name[DRM_PROP_NAME_LEN-1] = '\0';
2595 }
2590 2596
2591 list_add_tail(&property->head, &dev->mode_config.property_list); 2597 list_add_tail(&property->head, &dev->mode_config.property_list);
2592 return property; 2598 return property;
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index ccbdc0b5854c..42f86e71479a 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -457,6 +457,30 @@ done:
457EXPORT_SYMBOL(drm_crtc_helper_set_mode); 457EXPORT_SYMBOL(drm_crtc_helper_set_mode);
458 458
459 459
460static int
461drm_crtc_helper_disable(struct drm_crtc *crtc)
462{
463 struct drm_device *dev = crtc->dev;
464 struct drm_connector *connector;
465 struct drm_encoder *encoder;
466
467 /* Decouple all encoders and their attached connectors from this crtc */
468 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
469 if (encoder->crtc != crtc)
470 continue;
471
472 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
473 if (connector->encoder != encoder)
474 continue;
475
476 connector->encoder = NULL;
477 }
478 }
479
480 drm_helper_disable_unused_functions(dev);
481 return 0;
482}
483
460/** 484/**
461 * drm_crtc_helper_set_config - set a new config from userspace 485 * drm_crtc_helper_set_config - set a new config from userspace
462 * @crtc: CRTC to setup 486 * @crtc: CRTC to setup
@@ -485,6 +509,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
485 struct drm_connector *save_connectors, *connector; 509 struct drm_connector *save_connectors, *connector;
486 int count = 0, ro, fail = 0; 510 int count = 0, ro, fail = 0;
487 struct drm_crtc_helper_funcs *crtc_funcs; 511 struct drm_crtc_helper_funcs *crtc_funcs;
512 struct drm_mode_set save_set;
488 int ret = 0; 513 int ret = 0;
489 int i; 514 int i;
490 515
@@ -510,8 +535,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
510 (int)set->num_connectors, set->x, set->y); 535 (int)set->num_connectors, set->x, set->y);
511 } else { 536 } else {
512 DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id); 537 DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
513 set->mode = NULL; 538 return drm_crtc_helper_disable(set->crtc);
514 set->num_connectors = 0;
515 } 539 }
516 540
517 dev = set->crtc->dev; 541 dev = set->crtc->dev;
@@ -557,6 +581,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
557 save_connectors[count++] = *connector; 581 save_connectors[count++] = *connector;
558 } 582 }
559 583
584 save_set.crtc = set->crtc;
585 save_set.mode = &set->crtc->mode;
586 save_set.x = set->crtc->x;
587 save_set.y = set->crtc->y;
588 save_set.fb = set->crtc->fb;
589
560 /* We should be able to check here if the fb has the same properties 590 /* We should be able to check here if the fb has the same properties
561 * and then just flip_or_move it */ 591 * and then just flip_or_move it */
562 if (set->crtc->fb != set->fb) { 592 if (set->crtc->fb != set->fb) {
@@ -722,6 +752,12 @@ fail:
722 *connector = save_connectors[count++]; 752 *connector = save_connectors[count++];
723 } 753 }
724 754
755 /* Try to restore the config */
756 if (mode_changed &&
757 !drm_crtc_helper_set_mode(save_set.crtc, save_set.mode, save_set.x,
758 save_set.y, save_set.fb))
759 DRM_ERROR("failed to restore config after modeset failure\n");
760
725 kfree(save_connectors); 761 kfree(save_connectors);
726 kfree(save_encoders); 762 kfree(save_encoders);
727 kfree(save_crtcs); 763 kfree(save_crtcs);
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index d067c12ba940..1c7a1c0d3edd 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -118,7 +118,10 @@ int drm_debugfs_create_files(struct drm_info_list *files, int count,
118 tmp->minor = minor; 118 tmp->minor = minor;
119 tmp->dent = ent; 119 tmp->dent = ent;
120 tmp->info_ent = &files[i]; 120 tmp->info_ent = &files[i];
121 list_add(&(tmp->list), &(minor->debugfs_nodes.list)); 121
122 mutex_lock(&minor->debugfs_lock);
123 list_add(&tmp->list, &minor->debugfs_list);
124 mutex_unlock(&minor->debugfs_lock);
122 } 125 }
123 return 0; 126 return 0;
124 127
@@ -146,7 +149,8 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id,
146 char name[64]; 149 char name[64];
147 int ret; 150 int ret;
148 151
149 INIT_LIST_HEAD(&minor->debugfs_nodes.list); 152 INIT_LIST_HEAD(&minor->debugfs_list);
153 mutex_init(&minor->debugfs_lock);
150 sprintf(name, "%d", minor_id); 154 sprintf(name, "%d", minor_id);
151 minor->debugfs_root = debugfs_create_dir(name, root); 155 minor->debugfs_root = debugfs_create_dir(name, root);
152 if (!minor->debugfs_root) { 156 if (!minor->debugfs_root) {
@@ -192,8 +196,9 @@ int drm_debugfs_remove_files(struct drm_info_list *files, int count,
192 struct drm_info_node *tmp; 196 struct drm_info_node *tmp;
193 int i; 197 int i;
194 198
199 mutex_lock(&minor->debugfs_lock);
195 for (i = 0; i < count; i++) { 200 for (i = 0; i < count; i++) {
196 list_for_each_safe(pos, q, &minor->debugfs_nodes.list) { 201 list_for_each_safe(pos, q, &minor->debugfs_list) {
197 tmp = list_entry(pos, struct drm_info_node, list); 202 tmp = list_entry(pos, struct drm_info_node, list);
198 if (tmp->info_ent == &files[i]) { 203 if (tmp->info_ent == &files[i]) {
199 debugfs_remove(tmp->dent); 204 debugfs_remove(tmp->dent);
@@ -202,6 +207,7 @@ int drm_debugfs_remove_files(struct drm_info_list *files, int count,
202 } 207 }
203 } 208 }
204 } 209 }
210 mutex_unlock(&minor->debugfs_lock);
205 return 0; 211 return 0;
206} 212}
207EXPORT_SYMBOL(drm_debugfs_remove_files); 213EXPORT_SYMBOL(drm_debugfs_remove_files);
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index eaf25ffd9a46..bc5febe45762 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -125,7 +125,7 @@ static struct drm_ioctl_desc drm_ioctls[] = {
125 DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 125 DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
126 DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 126 DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
127 127
128 DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, 0), 128 DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED),
129 129
130 DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0), 130 DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
131 131
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index cb3794a00f98..44a5d0ad8b7c 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -110,10 +110,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
110 /* Prevent vblank irq processing while disabling vblank irqs, 110 /* Prevent vblank irq processing while disabling vblank irqs,
111 * so no updates of timestamps or count can happen after we've 111 * so no updates of timestamps or count can happen after we've
112 * disabled. Needed to prevent races in case of delayed irq's. 112 * disabled. Needed to prevent races in case of delayed irq's.
113 * Disable preemption, so vblank_time_lock is held as short as
114 * possible, even under a kernel with PREEMPT_RT patches.
115 */ 113 */
116 preempt_disable();
117 spin_lock_irqsave(&dev->vblank_time_lock, irqflags); 114 spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
118 115
119 dev->driver->disable_vblank(dev, crtc); 116 dev->driver->disable_vblank(dev, crtc);
@@ -164,7 +161,6 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
164 clear_vblank_timestamps(dev, crtc); 161 clear_vblank_timestamps(dev, crtc);
165 162
166 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags); 163 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
167 preempt_enable();
168} 164}
169 165
170static void vblank_disable_fn(unsigned long arg) 166static void vblank_disable_fn(unsigned long arg)
@@ -407,13 +403,16 @@ int drm_irq_uninstall(struct drm_device *dev)
407 /* 403 /*
408 * Wake up any waiters so they don't hang. 404 * Wake up any waiters so they don't hang.
409 */ 405 */
410 spin_lock_irqsave(&dev->vbl_lock, irqflags); 406 if (dev->num_crtcs) {
411 for (i = 0; i < dev->num_crtcs; i++) { 407 spin_lock_irqsave(&dev->vbl_lock, irqflags);
412 DRM_WAKEUP(&dev->vbl_queue[i]); 408 for (i = 0; i < dev->num_crtcs; i++) {
413 dev->vblank_enabled[i] = 0; 409 DRM_WAKEUP(&dev->vbl_queue[i]);
414 dev->last_vblank[i] = dev->driver->get_vblank_counter(dev, i); 410 dev->vblank_enabled[i] = 0;
411 dev->last_vblank[i] =
412 dev->driver->get_vblank_counter(dev, i);
413 }
414 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
415 } 415 }
416 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
417 416
418 if (!irq_enabled) 417 if (!irq_enabled)
419 return -EINVAL; 418 return -EINVAL;
@@ -886,10 +885,6 @@ int drm_vblank_get(struct drm_device *dev, int crtc)
886 spin_lock_irqsave(&dev->vbl_lock, irqflags); 885 spin_lock_irqsave(&dev->vbl_lock, irqflags);
887 /* Going from 0->1 means we have to enable interrupts again */ 886 /* Going from 0->1 means we have to enable interrupts again */
888 if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) { 887 if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) {
889 /* Disable preemption while holding vblank_time_lock. Do
890 * it explicitely to guard against PREEMPT_RT kernel.
891 */
892 preempt_disable();
893 spin_lock_irqsave(&dev->vblank_time_lock, irqflags2); 888 spin_lock_irqsave(&dev->vblank_time_lock, irqflags2);
894 if (!dev->vblank_enabled[crtc]) { 889 if (!dev->vblank_enabled[crtc]) {
895 /* Enable vblank irqs under vblank_time_lock protection. 890 /* Enable vblank irqs under vblank_time_lock protection.
@@ -909,7 +904,6 @@ int drm_vblank_get(struct drm_device *dev, int crtc)
909 } 904 }
910 } 905 }
911 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags2); 906 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags2);
912 preempt_enable();
913 } else { 907 } else {
914 if (!dev->vblank_enabled[crtc]) { 908 if (!dev->vblank_enabled[crtc]) {
915 atomic_dec(&dev->vblank_refcount[crtc]); 909 atomic_dec(&dev->vblank_refcount[crtc]);
@@ -1125,6 +1119,7 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
1125 trace_drm_vblank_event_delivered(current->pid, pipe, 1119 trace_drm_vblank_event_delivered(current->pid, pipe,
1126 vblwait->request.sequence); 1120 vblwait->request.sequence);
1127 } else { 1121 } else {
1122 /* drm_handle_vblank_events will call drm_vblank_put */
1128 list_add_tail(&e->base.link, &dev->vblank_event_list); 1123 list_add_tail(&e->base.link, &dev->vblank_event_list);
1129 vblwait->reply.sequence = vblwait->request.sequence; 1124 vblwait->reply.sequence = vblwait->request.sequence;
1130 } 1125 }
@@ -1205,8 +1200,12 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
1205 goto done; 1200 goto done;
1206 } 1201 }
1207 1202
1208 if (flags & _DRM_VBLANK_EVENT) 1203 if (flags & _DRM_VBLANK_EVENT) {
1204 /* must hold on to the vblank ref until the event fires
1205 * drm_vblank_put will be called asynchronously
1206 */
1209 return drm_queue_vblank_event(dev, crtc, vblwait, file_priv); 1207 return drm_queue_vblank_event(dev, crtc, vblwait, file_priv);
1208 }
1210 1209
1211 if ((flags & _DRM_VBLANK_NEXTONMISS) && 1210 if ((flags & _DRM_VBLANK_NEXTONMISS) &&
1212 (seq - vblwait->request.sequence) <= (1<<23)) { 1211 (seq - vblwait->request.sequence) <= (1<<23)) {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c
index 6f8afea94fc9..2bb07bca511a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.c
@@ -27,82 +27,84 @@
27#include "drm.h" 27#include "drm.h"
28 28
29#include "exynos_drm_drv.h" 29#include "exynos_drm_drv.h"
30#include "exynos_drm_gem.h"
30#include "exynos_drm_buf.h" 31#include "exynos_drm_buf.h"
31 32
32static DEFINE_MUTEX(exynos_drm_buf_lock);
33
34static int lowlevel_buffer_allocate(struct drm_device *dev, 33static int lowlevel_buffer_allocate(struct drm_device *dev,
35 struct exynos_drm_buf_entry *entry) 34 struct exynos_drm_gem_buf *buffer)
36{ 35{
37 DRM_DEBUG_KMS("%s\n", __FILE__); 36 DRM_DEBUG_KMS("%s\n", __FILE__);
38 37
39 entry->vaddr = dma_alloc_writecombine(dev->dev, entry->size, 38 buffer->kvaddr = dma_alloc_writecombine(dev->dev, buffer->size,
40 (dma_addr_t *)&entry->paddr, GFP_KERNEL); 39 &buffer->dma_addr, GFP_KERNEL);
41 if (!entry->paddr) { 40 if (!buffer->kvaddr) {
42 DRM_ERROR("failed to allocate buffer.\n"); 41 DRM_ERROR("failed to allocate buffer.\n");
43 return -ENOMEM; 42 return -ENOMEM;
44 } 43 }
45 44
46 DRM_DEBUG_KMS("allocated : vaddr(0x%x), paddr(0x%x), size(0x%x)\n", 45 DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n",
47 (unsigned int)entry->vaddr, entry->paddr, entry->size); 46 (unsigned long)buffer->kvaddr,
47 (unsigned long)buffer->dma_addr,
48 buffer->size);
48 49
49 return 0; 50 return 0;
50} 51}
51 52
52static void lowlevel_buffer_deallocate(struct drm_device *dev, 53static void lowlevel_buffer_deallocate(struct drm_device *dev,
53 struct exynos_drm_buf_entry *entry) 54 struct exynos_drm_gem_buf *buffer)
54{ 55{
55 DRM_DEBUG_KMS("%s.\n", __FILE__); 56 DRM_DEBUG_KMS("%s.\n", __FILE__);
56 57
57 if (entry->paddr && entry->vaddr && entry->size) 58 if (buffer->dma_addr && buffer->size)
58 dma_free_writecombine(dev->dev, entry->size, entry->vaddr, 59 dma_free_writecombine(dev->dev, buffer->size, buffer->kvaddr,
59 entry->paddr); 60 (dma_addr_t)buffer->dma_addr);
60 else 61 else
61 DRM_DEBUG_KMS("entry data is null.\n"); 62 DRM_DEBUG_KMS("buffer data are invalid.\n");
62} 63}
63 64
64struct exynos_drm_buf_entry *exynos_drm_buf_create(struct drm_device *dev, 65struct exynos_drm_gem_buf *exynos_drm_buf_create(struct drm_device *dev,
65 unsigned int size) 66 unsigned int size)
66{ 67{
67 struct exynos_drm_buf_entry *entry; 68 struct exynos_drm_gem_buf *buffer;
68 69
69 DRM_DEBUG_KMS("%s.\n", __FILE__); 70 DRM_DEBUG_KMS("%s.\n", __FILE__);
71 DRM_DEBUG_KMS("desired size = 0x%x\n", size);
70 72
71 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 73 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
72 if (!entry) { 74 if (!buffer) {
73 DRM_ERROR("failed to allocate exynos_drm_buf_entry.\n"); 75 DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
74 return ERR_PTR(-ENOMEM); 76 return ERR_PTR(-ENOMEM);
75 } 77 }
76 78
77 entry->size = size; 79 buffer->size = size;
78 80
79 /* 81 /*
80 * allocate memory region with size and set the memory information 82 * allocate memory region with size and set the memory information
81 * to vaddr and paddr of a entry object. 83 * to vaddr and dma_addr of a buffer object.
82 */ 84 */
83 if (lowlevel_buffer_allocate(dev, entry) < 0) { 85 if (lowlevel_buffer_allocate(dev, buffer) < 0) {
84 kfree(entry); 86 kfree(buffer);
85 entry = NULL; 87 buffer = NULL;
86 return ERR_PTR(-ENOMEM); 88 return ERR_PTR(-ENOMEM);
87 } 89 }
88 90
89 return entry; 91 return buffer;
90} 92}
91 93
92void exynos_drm_buf_destroy(struct drm_device *dev, 94void exynos_drm_buf_destroy(struct drm_device *dev,
93 struct exynos_drm_buf_entry *entry) 95 struct exynos_drm_gem_buf *buffer)
94{ 96{
95 DRM_DEBUG_KMS("%s.\n", __FILE__); 97 DRM_DEBUG_KMS("%s.\n", __FILE__);
96 98
97 if (!entry) { 99 if (!buffer) {
98 DRM_DEBUG_KMS("entry is null.\n"); 100 DRM_DEBUG_KMS("buffer is null.\n");
99 return; 101 return;
100 } 102 }
101 103
102 lowlevel_buffer_deallocate(dev, entry); 104 lowlevel_buffer_deallocate(dev, buffer);
103 105
104 kfree(entry); 106 kfree(buffer);
105 entry = NULL; 107 buffer = NULL;
106} 108}
107 109
108MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>"); 110MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.h b/drivers/gpu/drm/exynos/exynos_drm_buf.h
index 045d59eab01a..6e91f9caa5db 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.h
@@ -26,28 +26,15 @@
26#ifndef _EXYNOS_DRM_BUF_H_ 26#ifndef _EXYNOS_DRM_BUF_H_
27#define _EXYNOS_DRM_BUF_H_ 27#define _EXYNOS_DRM_BUF_H_
28 28
29/*
30 * exynos drm buffer entry structure.
31 *
32 * @paddr: physical address of allocated memory.
33 * @vaddr: kernel virtual address of allocated memory.
34 * @size: size of allocated memory.
35 */
36struct exynos_drm_buf_entry {
37 dma_addr_t paddr;
38 void __iomem *vaddr;
39 unsigned int size;
40};
41
42/* allocate physical memory. */ 29/* allocate physical memory. */
43struct exynos_drm_buf_entry *exynos_drm_buf_create(struct drm_device *dev, 30struct exynos_drm_gem_buf *exynos_drm_buf_create(struct drm_device *dev,
44 unsigned int size); 31 unsigned int size);
45 32
46/* get physical memory information of a drm framebuffer. */ 33/* get memory information of a drm framebuffer. */
47struct exynos_drm_buf_entry *exynos_drm_fb_get_buf(struct drm_framebuffer *fb); 34struct exynos_drm_gem_buf *exynos_drm_fb_get_buf(struct drm_framebuffer *fb);
48 35
49/* remove allocated physical memory. */ 36/* remove allocated physical memory. */
50void exynos_drm_buf_destroy(struct drm_device *dev, 37void exynos_drm_buf_destroy(struct drm_device *dev,
51 struct exynos_drm_buf_entry *entry); 38 struct exynos_drm_gem_buf *buffer);
52 39
53#endif 40#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c
index 985d9e768728..d620b0784257 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_connector.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_connector.c
@@ -37,6 +37,8 @@
37 37
38struct exynos_drm_connector { 38struct exynos_drm_connector {
39 struct drm_connector drm_connector; 39 struct drm_connector drm_connector;
40 uint32_t encoder_id;
41 struct exynos_drm_manager *manager;
40}; 42};
41 43
42/* convert exynos_video_timings to drm_display_mode */ 44/* convert exynos_video_timings to drm_display_mode */
@@ -47,6 +49,7 @@ convert_to_display_mode(struct drm_display_mode *mode,
47 DRM_DEBUG_KMS("%s\n", __FILE__); 49 DRM_DEBUG_KMS("%s\n", __FILE__);
48 50
49 mode->clock = timing->pixclock / 1000; 51 mode->clock = timing->pixclock / 1000;
52 mode->vrefresh = timing->refresh;
50 53
51 mode->hdisplay = timing->xres; 54 mode->hdisplay = timing->xres;
52 mode->hsync_start = mode->hdisplay + timing->left_margin; 55 mode->hsync_start = mode->hdisplay + timing->left_margin;
@@ -57,6 +60,12 @@ convert_to_display_mode(struct drm_display_mode *mode,
57 mode->vsync_start = mode->vdisplay + timing->upper_margin; 60 mode->vsync_start = mode->vdisplay + timing->upper_margin;
58 mode->vsync_end = mode->vsync_start + timing->vsync_len; 61 mode->vsync_end = mode->vsync_start + timing->vsync_len;
59 mode->vtotal = mode->vsync_end + timing->lower_margin; 62 mode->vtotal = mode->vsync_end + timing->lower_margin;
63
64 if (timing->vmode & FB_VMODE_INTERLACED)
65 mode->flags |= DRM_MODE_FLAG_INTERLACE;
66
67 if (timing->vmode & FB_VMODE_DOUBLE)
68 mode->flags |= DRM_MODE_FLAG_DBLSCAN;
60} 69}
61 70
62/* convert drm_display_mode to exynos_video_timings */ 71/* convert drm_display_mode to exynos_video_timings */
@@ -69,7 +78,7 @@ convert_to_video_timing(struct fb_videomode *timing,
69 memset(timing, 0, sizeof(*timing)); 78 memset(timing, 0, sizeof(*timing));
70 79
71 timing->pixclock = mode->clock * 1000; 80 timing->pixclock = mode->clock * 1000;
72 timing->refresh = mode->vrefresh; 81 timing->refresh = drm_mode_vrefresh(mode);
73 82
74 timing->xres = mode->hdisplay; 83 timing->xres = mode->hdisplay;
75 timing->left_margin = mode->hsync_start - mode->hdisplay; 84 timing->left_margin = mode->hsync_start - mode->hdisplay;
@@ -92,15 +101,16 @@ convert_to_video_timing(struct fb_videomode *timing,
92 101
93static int exynos_drm_connector_get_modes(struct drm_connector *connector) 102static int exynos_drm_connector_get_modes(struct drm_connector *connector)
94{ 103{
95 struct exynos_drm_manager *manager = 104 struct exynos_drm_connector *exynos_connector =
96 exynos_drm_get_manager(connector->encoder); 105 to_exynos_connector(connector);
97 struct exynos_drm_display *display = manager->display; 106 struct exynos_drm_manager *manager = exynos_connector->manager;
107 struct exynos_drm_display_ops *display_ops = manager->display_ops;
98 unsigned int count; 108 unsigned int count;
99 109
100 DRM_DEBUG_KMS("%s\n", __FILE__); 110 DRM_DEBUG_KMS("%s\n", __FILE__);
101 111
102 if (!display) { 112 if (!display_ops) {
103 DRM_DEBUG_KMS("display is null.\n"); 113 DRM_DEBUG_KMS("display_ops is null.\n");
104 return 0; 114 return 0;
105 } 115 }
106 116
@@ -112,7 +122,7 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
112 * P.S. in case of lcd panel, count is always 1 if success 122 * P.S. in case of lcd panel, count is always 1 if success
113 * because lcd panel has only one mode. 123 * because lcd panel has only one mode.
114 */ 124 */
115 if (display->get_edid) { 125 if (display_ops->get_edid) {
116 int ret; 126 int ret;
117 void *edid; 127 void *edid;
118 128
@@ -122,7 +132,7 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
122 return 0; 132 return 0;
123 } 133 }
124 134
125 ret = display->get_edid(manager->dev, connector, 135 ret = display_ops->get_edid(manager->dev, connector,
126 edid, MAX_EDID); 136 edid, MAX_EDID);
127 if (ret < 0) { 137 if (ret < 0) {
128 DRM_ERROR("failed to get edid data.\n"); 138 DRM_ERROR("failed to get edid data.\n");
@@ -140,8 +150,8 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
140 struct drm_display_mode *mode = drm_mode_create(connector->dev); 150 struct drm_display_mode *mode = drm_mode_create(connector->dev);
141 struct fb_videomode *timing; 151 struct fb_videomode *timing;
142 152
143 if (display->get_timing) 153 if (display_ops->get_timing)
144 timing = display->get_timing(manager->dev); 154 timing = display_ops->get_timing(manager->dev);
145 else { 155 else {
146 drm_mode_destroy(connector->dev, mode); 156 drm_mode_destroy(connector->dev, mode);
147 return 0; 157 return 0;
@@ -162,9 +172,10 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
162static int exynos_drm_connector_mode_valid(struct drm_connector *connector, 172static int exynos_drm_connector_mode_valid(struct drm_connector *connector,
163 struct drm_display_mode *mode) 173 struct drm_display_mode *mode)
164{ 174{
165 struct exynos_drm_manager *manager = 175 struct exynos_drm_connector *exynos_connector =
166 exynos_drm_get_manager(connector->encoder); 176 to_exynos_connector(connector);
167 struct exynos_drm_display *display = manager->display; 177 struct exynos_drm_manager *manager = exynos_connector->manager;
178 struct exynos_drm_display_ops *display_ops = manager->display_ops;
168 struct fb_videomode timing; 179 struct fb_videomode timing;
169 int ret = MODE_BAD; 180 int ret = MODE_BAD;
170 181
@@ -172,8 +183,8 @@ static int exynos_drm_connector_mode_valid(struct drm_connector *connector,
172 183
173 convert_to_video_timing(&timing, mode); 184 convert_to_video_timing(&timing, mode);
174 185
175 if (display && display->check_timing) 186 if (display_ops && display_ops->check_timing)
176 if (!display->check_timing(manager->dev, (void *)&timing)) 187 if (!display_ops->check_timing(manager->dev, (void *)&timing))
177 ret = MODE_OK; 188 ret = MODE_OK;
178 189
179 return ret; 190 return ret;
@@ -181,9 +192,25 @@ static int exynos_drm_connector_mode_valid(struct drm_connector *connector,
181 192
182struct drm_encoder *exynos_drm_best_encoder(struct drm_connector *connector) 193struct drm_encoder *exynos_drm_best_encoder(struct drm_connector *connector)
183{ 194{
195 struct drm_device *dev = connector->dev;
196 struct exynos_drm_connector *exynos_connector =
197 to_exynos_connector(connector);
198 struct drm_mode_object *obj;
199 struct drm_encoder *encoder;
200
184 DRM_DEBUG_KMS("%s\n", __FILE__); 201 DRM_DEBUG_KMS("%s\n", __FILE__);
185 202
186 return connector->encoder; 203 obj = drm_mode_object_find(dev, exynos_connector->encoder_id,
204 DRM_MODE_OBJECT_ENCODER);
205 if (!obj) {
206 DRM_DEBUG_KMS("Unknown ENCODER ID %d\n",
207 exynos_connector->encoder_id);
208 return NULL;
209 }
210
211 encoder = obj_to_encoder(obj);
212
213 return encoder;
187} 214}
188 215
189static struct drm_connector_helper_funcs exynos_connector_helper_funcs = { 216static struct drm_connector_helper_funcs exynos_connector_helper_funcs = {
@@ -196,15 +223,17 @@ static struct drm_connector_helper_funcs exynos_connector_helper_funcs = {
196static enum drm_connector_status 223static enum drm_connector_status
197exynos_drm_connector_detect(struct drm_connector *connector, bool force) 224exynos_drm_connector_detect(struct drm_connector *connector, bool force)
198{ 225{
199 struct exynos_drm_manager *manager = 226 struct exynos_drm_connector *exynos_connector =
200 exynos_drm_get_manager(connector->encoder); 227 to_exynos_connector(connector);
201 struct exynos_drm_display *display = manager->display; 228 struct exynos_drm_manager *manager = exynos_connector->manager;
229 struct exynos_drm_display_ops *display_ops =
230 manager->display_ops;
202 enum drm_connector_status status = connector_status_disconnected; 231 enum drm_connector_status status = connector_status_disconnected;
203 232
204 DRM_DEBUG_KMS("%s\n", __FILE__); 233 DRM_DEBUG_KMS("%s\n", __FILE__);
205 234
206 if (display && display->is_connected) { 235 if (display_ops && display_ops->is_connected) {
207 if (display->is_connected(manager->dev)) 236 if (display_ops->is_connected(manager->dev))
208 status = connector_status_connected; 237 status = connector_status_connected;
209 else 238 else
210 status = connector_status_disconnected; 239 status = connector_status_disconnected;
@@ -251,9 +280,11 @@ struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
251 280
252 connector = &exynos_connector->drm_connector; 281 connector = &exynos_connector->drm_connector;
253 282
254 switch (manager->display->type) { 283 switch (manager->display_ops->type) {
255 case EXYNOS_DISPLAY_TYPE_HDMI: 284 case EXYNOS_DISPLAY_TYPE_HDMI:
256 type = DRM_MODE_CONNECTOR_HDMIA; 285 type = DRM_MODE_CONNECTOR_HDMIA;
286 connector->interlace_allowed = true;
287 connector->polled = DRM_CONNECTOR_POLL_HPD;
257 break; 288 break;
258 default: 289 default:
259 type = DRM_MODE_CONNECTOR_Unknown; 290 type = DRM_MODE_CONNECTOR_Unknown;
@@ -267,7 +298,10 @@ struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
267 if (err) 298 if (err)
268 goto err_connector; 299 goto err_connector;
269 300
301 exynos_connector->encoder_id = encoder->base.id;
302 exynos_connector->manager = manager;
270 connector->encoder = encoder; 303 connector->encoder = encoder;
304
271 err = drm_mode_connector_attach_encoder(connector, encoder); 305 err = drm_mode_connector_attach_encoder(connector, encoder);
272 if (err) { 306 if (err) {
273 DRM_ERROR("failed to attach a connector to a encoder\n"); 307 DRM_ERROR("failed to attach a connector to a encoder\n");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 73893e5068a4..7777d41d1cda 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -29,36 +29,17 @@
29#include "drmP.h" 29#include "drmP.h"
30#include "drm_crtc_helper.h" 30#include "drm_crtc_helper.h"
31 31
32#include "exynos_drm_crtc.h"
32#include "exynos_drm_drv.h" 33#include "exynos_drm_drv.h"
33#include "exynos_drm_fb.h" 34#include "exynos_drm_fb.h"
34#include "exynos_drm_encoder.h" 35#include "exynos_drm_encoder.h"
36#include "exynos_drm_gem.h"
35#include "exynos_drm_buf.h" 37#include "exynos_drm_buf.h"
36 38
37#define to_exynos_crtc(x) container_of(x, struct exynos_drm_crtc,\ 39#define to_exynos_crtc(x) container_of(x, struct exynos_drm_crtc,\
38 drm_crtc) 40 drm_crtc)
39 41
40/* 42/*
41 * Exynos specific crtc postion structure.
42 *
43 * @fb_x: offset x on a framebuffer to be displyed
44 * - the unit is screen coordinates.
45 * @fb_y: offset y on a framebuffer to be displayed
46 * - the unit is screen coordinates.
47 * @crtc_x: offset x on hardware screen.
48 * @crtc_y: offset y on hardware screen.
49 * @crtc_w: width of hardware screen.
50 * @crtc_h: height of hardware screen.
51 */
52struct exynos_drm_crtc_pos {
53 unsigned int fb_x;
54 unsigned int fb_y;
55 unsigned int crtc_x;
56 unsigned int crtc_y;
57 unsigned int crtc_w;
58 unsigned int crtc_h;
59};
60
61/*
62 * Exynos specific crtc structure. 43 * Exynos specific crtc structure.
63 * 44 *
64 * @drm_crtc: crtc object. 45 * @drm_crtc: crtc object.
@@ -85,30 +66,31 @@ static void exynos_drm_crtc_apply(struct drm_crtc *crtc)
85 66
86 exynos_drm_fn_encoder(crtc, overlay, 67 exynos_drm_fn_encoder(crtc, overlay,
87 exynos_drm_encoder_crtc_mode_set); 68 exynos_drm_encoder_crtc_mode_set);
88 exynos_drm_fn_encoder(crtc, NULL, exynos_drm_encoder_crtc_commit); 69 exynos_drm_fn_encoder(crtc, &exynos_crtc->pipe,
70 exynos_drm_encoder_crtc_commit);
89} 71}
90 72
91static int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay, 73int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay,
92 struct drm_framebuffer *fb, 74 struct drm_framebuffer *fb,
93 struct drm_display_mode *mode, 75 struct drm_display_mode *mode,
94 struct exynos_drm_crtc_pos *pos) 76 struct exynos_drm_crtc_pos *pos)
95{ 77{
96 struct exynos_drm_buf_entry *entry; 78 struct exynos_drm_gem_buf *buffer;
97 unsigned int actual_w; 79 unsigned int actual_w;
98 unsigned int actual_h; 80 unsigned int actual_h;
99 81
100 entry = exynos_drm_fb_get_buf(fb); 82 buffer = exynos_drm_fb_get_buf(fb);
101 if (!entry) { 83 if (!buffer) {
102 DRM_LOG_KMS("entry is null.\n"); 84 DRM_LOG_KMS("buffer is null.\n");
103 return -EFAULT; 85 return -EFAULT;
104 } 86 }
105 87
106 overlay->paddr = entry->paddr; 88 overlay->dma_addr = buffer->dma_addr;
107 overlay->vaddr = entry->vaddr; 89 overlay->vaddr = buffer->kvaddr;
108 90
109 DRM_DEBUG_KMS("vaddr = 0x%lx, paddr = 0x%lx\n", 91 DRM_DEBUG_KMS("vaddr = 0x%lx, dma_addr = 0x%lx\n",
110 (unsigned long)overlay->vaddr, 92 (unsigned long)overlay->vaddr,
111 (unsigned long)overlay->paddr); 93 (unsigned long)overlay->dma_addr);
112 94
113 actual_w = min((mode->hdisplay - pos->crtc_x), pos->crtc_w); 95 actual_w = min((mode->hdisplay - pos->crtc_x), pos->crtc_w);
114 actual_h = min((mode->vdisplay - pos->crtc_y), pos->crtc_h); 96 actual_h = min((mode->vdisplay - pos->crtc_y), pos->crtc_h);
@@ -171,9 +153,26 @@ static int exynos_drm_crtc_update(struct drm_crtc *crtc)
171 153
172static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode) 154static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
173{ 155{
174 DRM_DEBUG_KMS("%s\n", __FILE__); 156 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
175 157
176 /* TODO */ 158 DRM_DEBUG_KMS("crtc[%d] mode[%d]\n", crtc->base.id, mode);
159
160 switch (mode) {
161 case DRM_MODE_DPMS_ON:
162 exynos_drm_fn_encoder(crtc, &exynos_crtc->pipe,
163 exynos_drm_encoder_crtc_commit);
164 break;
165 case DRM_MODE_DPMS_STANDBY:
166 case DRM_MODE_DPMS_SUSPEND:
167 case DRM_MODE_DPMS_OFF:
168 /* TODO */
169 exynos_drm_fn_encoder(crtc, NULL,
170 exynos_drm_encoder_crtc_disable);
171 break;
172 default:
173 DRM_DEBUG_KMS("unspecified mode %d\n", mode);
174 break;
175 }
177} 176}
178 177
179static void exynos_drm_crtc_prepare(struct drm_crtc *crtc) 178static void exynos_drm_crtc_prepare(struct drm_crtc *crtc)
@@ -185,9 +184,12 @@ static void exynos_drm_crtc_prepare(struct drm_crtc *crtc)
185 184
186static void exynos_drm_crtc_commit(struct drm_crtc *crtc) 185static void exynos_drm_crtc_commit(struct drm_crtc *crtc)
187{ 186{
187 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
188
188 DRM_DEBUG_KMS("%s\n", __FILE__); 189 DRM_DEBUG_KMS("%s\n", __FILE__);
189 190
190 /* drm framework doesn't check NULL. */ 191 exynos_drm_fn_encoder(crtc, &exynos_crtc->pipe,
192 exynos_drm_encoder_crtc_commit);
191} 193}
192 194
193static bool 195static bool
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
index c584042d6d2c..25f72a62cb88 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
@@ -35,4 +35,29 @@ int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr);
35int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int crtc); 35int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int crtc);
36void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int crtc); 36void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int crtc);
37 37
38/*
39 * Exynos specific crtc postion structure.
40 *
41 * @fb_x: offset x on a framebuffer to be displyed
42 * - the unit is screen coordinates.
43 * @fb_y: offset y on a framebuffer to be displayed
44 * - the unit is screen coordinates.
45 * @crtc_x: offset x on hardware screen.
46 * @crtc_y: offset y on hardware screen.
47 * @crtc_w: width of hardware screen.
48 * @crtc_h: height of hardware screen.
49 */
50struct exynos_drm_crtc_pos {
51 unsigned int fb_x;
52 unsigned int fb_y;
53 unsigned int crtc_x;
54 unsigned int crtc_y;
55 unsigned int crtc_w;
56 unsigned int crtc_h;
57};
58
59int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay,
60 struct drm_framebuffer *fb,
61 struct drm_display_mode *mode,
62 struct exynos_drm_crtc_pos *pos);
38#endif 63#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 83810cbe3c17..53e2216de61d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -27,6 +27,7 @@
27 27
28#include "drmP.h" 28#include "drmP.h"
29#include "drm.h" 29#include "drm.h"
30#include "drm_crtc_helper.h"
30 31
31#include <drm/exynos_drm.h> 32#include <drm/exynos_drm.h>
32 33
@@ -61,6 +62,9 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
61 62
62 drm_mode_config_init(dev); 63 drm_mode_config_init(dev);
63 64
65 /* init kms poll for handling hpd */
66 drm_kms_helper_poll_init(dev);
67
64 exynos_drm_mode_config_init(dev); 68 exynos_drm_mode_config_init(dev);
65 69
66 /* 70 /*
@@ -116,6 +120,7 @@ static int exynos_drm_unload(struct drm_device *dev)
116 exynos_drm_fbdev_fini(dev); 120 exynos_drm_fbdev_fini(dev);
117 exynos_drm_device_unregister(dev); 121 exynos_drm_device_unregister(dev);
118 drm_vblank_cleanup(dev); 122 drm_vblank_cleanup(dev);
123 drm_kms_helper_poll_fini(dev);
119 drm_mode_config_cleanup(dev); 124 drm_mode_config_cleanup(dev);
120 kfree(dev->dev_private); 125 kfree(dev->dev_private);
121 126
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index c03683f2ae72..5e02e6ecc2e0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -29,6 +29,7 @@
29#ifndef _EXYNOS_DRM_DRV_H_ 29#ifndef _EXYNOS_DRM_DRV_H_
30#define _EXYNOS_DRM_DRV_H_ 30#define _EXYNOS_DRM_DRV_H_
31 31
32#include <linux/module.h>
32#include "drm.h" 33#include "drm.h"
33 34
34#define MAX_CRTC 2 35#define MAX_CRTC 2
@@ -79,8 +80,8 @@ struct exynos_drm_overlay_ops {
79 * @scan_flag: interlace or progressive way. 80 * @scan_flag: interlace or progressive way.
80 * (it could be DRM_MODE_FLAG_*) 81 * (it could be DRM_MODE_FLAG_*)
81 * @bpp: pixel size.(in bit) 82 * @bpp: pixel size.(in bit)
82 * @paddr: bus(accessed by dma) physical memory address to this overlay 83 * @dma_addr: bus(accessed by dma) address to the memory region allocated
83 * and this is physically continuous. 84 * for a overlay.
84 * @vaddr: virtual memory addresss to this overlay. 85 * @vaddr: virtual memory addresss to this overlay.
85 * @default_win: a window to be enabled. 86 * @default_win: a window to be enabled.
86 * @color_key: color key on or off. 87 * @color_key: color key on or off.
@@ -108,7 +109,7 @@ struct exynos_drm_overlay {
108 unsigned int scan_flag; 109 unsigned int scan_flag;
109 unsigned int bpp; 110 unsigned int bpp;
110 unsigned int pitch; 111 unsigned int pitch;
111 dma_addr_t paddr; 112 dma_addr_t dma_addr;
112 void __iomem *vaddr; 113 void __iomem *vaddr;
113 114
114 bool default_win; 115 bool default_win;
@@ -130,7 +131,7 @@ struct exynos_drm_overlay {
130 * @check_timing: check if timing is valid or not. 131 * @check_timing: check if timing is valid or not.
131 * @power_on: display device on or off. 132 * @power_on: display device on or off.
132 */ 133 */
133struct exynos_drm_display { 134struct exynos_drm_display_ops {
134 enum exynos_drm_output_type type; 135 enum exynos_drm_output_type type;
135 bool (*is_connected)(struct device *dev); 136 bool (*is_connected)(struct device *dev);
136 int (*get_edid)(struct device *dev, struct drm_connector *connector, 137 int (*get_edid)(struct device *dev, struct drm_connector *connector,
@@ -146,12 +147,14 @@ struct exynos_drm_display {
146 * @mode_set: convert drm_display_mode to hw specific display mode and 147 * @mode_set: convert drm_display_mode to hw specific display mode and
147 * would be called by encoder->mode_set(). 148 * would be called by encoder->mode_set().
148 * @commit: set current hw specific display mode to hw. 149 * @commit: set current hw specific display mode to hw.
150 * @disable: disable hardware specific display mode.
149 * @enable_vblank: specific driver callback for enabling vblank interrupt. 151 * @enable_vblank: specific driver callback for enabling vblank interrupt.
150 * @disable_vblank: specific driver callback for disabling vblank interrupt. 152 * @disable_vblank: specific driver callback for disabling vblank interrupt.
151 */ 153 */
152struct exynos_drm_manager_ops { 154struct exynos_drm_manager_ops {
153 void (*mode_set)(struct device *subdrv_dev, void *mode); 155 void (*mode_set)(struct device *subdrv_dev, void *mode);
154 void (*commit)(struct device *subdrv_dev); 156 void (*commit)(struct device *subdrv_dev);
157 void (*disable)(struct device *subdrv_dev);
155 int (*enable_vblank)(struct device *subdrv_dev); 158 int (*enable_vblank)(struct device *subdrv_dev);
156 void (*disable_vblank)(struct device *subdrv_dev); 159 void (*disable_vblank)(struct device *subdrv_dev);
157}; 160};
@@ -178,7 +181,7 @@ struct exynos_drm_manager {
178 int pipe; 181 int pipe;
179 struct exynos_drm_manager_ops *ops; 182 struct exynos_drm_manager_ops *ops;
180 struct exynos_drm_overlay_ops *overlay_ops; 183 struct exynos_drm_overlay_ops *overlay_ops;
181 struct exynos_drm_display *display; 184 struct exynos_drm_display_ops *display_ops;
182}; 185};
183 186
184/* 187/*
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
index 7cf6fa86a67e..153061415baf 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
@@ -53,15 +53,36 @@ static void exynos_drm_encoder_dpms(struct drm_encoder *encoder, int mode)
53 struct drm_device *dev = encoder->dev; 53 struct drm_device *dev = encoder->dev;
54 struct drm_connector *connector; 54 struct drm_connector *connector;
55 struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder); 55 struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder);
56 struct exynos_drm_manager_ops *manager_ops = manager->ops;
56 57
57 DRM_DEBUG_KMS("%s, encoder dpms: %d\n", __FILE__, mode); 58 DRM_DEBUG_KMS("%s, encoder dpms: %d\n", __FILE__, mode);
58 59
60 switch (mode) {
61 case DRM_MODE_DPMS_ON:
62 if (manager_ops && manager_ops->commit)
63 manager_ops->commit(manager->dev);
64 break;
65 case DRM_MODE_DPMS_STANDBY:
66 case DRM_MODE_DPMS_SUSPEND:
67 case DRM_MODE_DPMS_OFF:
68 /* TODO */
69 if (manager_ops && manager_ops->disable)
70 manager_ops->disable(manager->dev);
71 break;
72 default:
73 DRM_ERROR("unspecified mode %d\n", mode);
74 break;
75 }
76
59 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 77 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
60 if (connector->encoder == encoder) { 78 if (connector->encoder == encoder) {
61 struct exynos_drm_display *display = manager->display; 79 struct exynos_drm_display_ops *display_ops =
80 manager->display_ops;
62 81
63 if (display && display->power_on) 82 DRM_DEBUG_KMS("connector[%d] dpms[%d]\n",
64 display->power_on(manager->dev, mode); 83 connector->base.id, mode);
84 if (display_ops && display_ops->power_on)
85 display_ops->power_on(manager->dev, mode);
65 } 86 }
66 } 87 }
67} 88}
@@ -116,15 +137,11 @@ static void exynos_drm_encoder_commit(struct drm_encoder *encoder)
116{ 137{
117 struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder); 138 struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder);
118 struct exynos_drm_manager_ops *manager_ops = manager->ops; 139 struct exynos_drm_manager_ops *manager_ops = manager->ops;
119 struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
120 140
121 DRM_DEBUG_KMS("%s\n", __FILE__); 141 DRM_DEBUG_KMS("%s\n", __FILE__);
122 142
123 if (manager_ops && manager_ops->commit) 143 if (manager_ops && manager_ops->commit)
124 manager_ops->commit(manager->dev); 144 manager_ops->commit(manager->dev);
125
126 if (overlay_ops && overlay_ops->commit)
127 overlay_ops->commit(manager->dev);
128} 145}
129 146
130static struct drm_crtc * 147static struct drm_crtc *
@@ -208,10 +225,23 @@ void exynos_drm_fn_encoder(struct drm_crtc *crtc, void *data,
208{ 225{
209 struct drm_device *dev = crtc->dev; 226 struct drm_device *dev = crtc->dev;
210 struct drm_encoder *encoder; 227 struct drm_encoder *encoder;
228 struct exynos_drm_private *private = dev->dev_private;
229 struct exynos_drm_manager *manager;
211 230
212 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 231 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
213 if (encoder->crtc != crtc) 232 /*
214 continue; 233 * if crtc is detached from encoder, check pipe,
234 * otherwise check crtc attached to encoder
235 */
236 if (!encoder->crtc) {
237 manager = to_exynos_encoder(encoder)->manager;
238 if (manager->pipe < 0 ||
239 private->crtc[manager->pipe] != crtc)
240 continue;
241 } else {
242 if (encoder->crtc != crtc)
243 continue;
244 }
215 245
216 fn(encoder, data); 246 fn(encoder, data);
217 } 247 }
@@ -250,8 +280,18 @@ void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data)
250 struct exynos_drm_manager *manager = 280 struct exynos_drm_manager *manager =
251 to_exynos_encoder(encoder)->manager; 281 to_exynos_encoder(encoder)->manager;
252 struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops; 282 struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
283 int crtc = *(int *)data;
284
285 DRM_DEBUG_KMS("%s\n", __FILE__);
286
287 /*
288 * when crtc is detached from encoder, this pipe is used
289 * to select manager operation
290 */
291 manager->pipe = crtc;
253 292
254 overlay_ops->commit(manager->dev); 293 if (overlay_ops && overlay_ops->commit)
294 overlay_ops->commit(manager->dev);
255} 295}
256 296
257void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data) 297void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data)
@@ -261,7 +301,28 @@ void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data)
261 struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops; 301 struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
262 struct exynos_drm_overlay *overlay = data; 302 struct exynos_drm_overlay *overlay = data;
263 303
264 overlay_ops->mode_set(manager->dev, overlay); 304 if (overlay_ops && overlay_ops->mode_set)
305 overlay_ops->mode_set(manager->dev, overlay);
306}
307
308void exynos_drm_encoder_crtc_disable(struct drm_encoder *encoder, void *data)
309{
310 struct exynos_drm_manager *manager =
311 to_exynos_encoder(encoder)->manager;
312 struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
313
314 DRM_DEBUG_KMS("\n");
315
316 if (overlay_ops && overlay_ops->disable)
317 overlay_ops->disable(manager->dev);
318
319 /*
320 * crtc is already detached from encoder and last
321 * function for detaching is properly done, so
322 * clear pipe from manager to prevent repeated call
323 */
324 if (!encoder->crtc)
325 manager->pipe = -1;
265} 326}
266 327
267MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>"); 328MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.h b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
index 5ecd645d06a9..a22acfbf0e4e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
@@ -41,5 +41,6 @@ void exynos_drm_enable_vblank(struct drm_encoder *encoder, void *data);
41void exynos_drm_disable_vblank(struct drm_encoder *encoder, void *data); 41void exynos_drm_disable_vblank(struct drm_encoder *encoder, void *data);
42void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data); 42void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data);
43void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data); 43void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data);
44void exynos_drm_encoder_crtc_disable(struct drm_encoder *encoder, void *data);
44 45
45#endif 46#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 48d29cfd5240..5bf4a1ac7f82 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -29,7 +29,9 @@
29#include "drmP.h" 29#include "drmP.h"
30#include "drm_crtc.h" 30#include "drm_crtc.h"
31#include "drm_crtc_helper.h" 31#include "drm_crtc_helper.h"
32#include "drm_fb_helper.h"
32 33
34#include "exynos_drm_drv.h"
33#include "exynos_drm_fb.h" 35#include "exynos_drm_fb.h"
34#include "exynos_drm_buf.h" 36#include "exynos_drm_buf.h"
35#include "exynos_drm_gem.h" 37#include "exynos_drm_gem.h"
@@ -41,14 +43,14 @@
41 * 43 *
42 * @fb: drm framebuffer obejct. 44 * @fb: drm framebuffer obejct.
43 * @exynos_gem_obj: exynos specific gem object containing a gem object. 45 * @exynos_gem_obj: exynos specific gem object containing a gem object.
44 * @entry: pointer to exynos drm buffer entry object. 46 * @buffer: pointer to exynos_drm_gem_buffer object.
45 * - containing only the information to physically continuous memory 47 * - contain the memory information to memory region allocated
46 * region allocated at default framebuffer creation. 48 * at default framebuffer creation.
47 */ 49 */
48struct exynos_drm_fb { 50struct exynos_drm_fb {
49 struct drm_framebuffer fb; 51 struct drm_framebuffer fb;
50 struct exynos_drm_gem_obj *exynos_gem_obj; 52 struct exynos_drm_gem_obj *exynos_gem_obj;
51 struct exynos_drm_buf_entry *entry; 53 struct exynos_drm_gem_buf *buffer;
52}; 54};
53 55
54static void exynos_drm_fb_destroy(struct drm_framebuffer *fb) 56static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
@@ -63,8 +65,8 @@ static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
63 * default framebuffer has no gem object so 65 * default framebuffer has no gem object so
64 * a buffer of the default framebuffer should be released at here. 66 * a buffer of the default framebuffer should be released at here.
65 */ 67 */
66 if (!exynos_fb->exynos_gem_obj && exynos_fb->entry) 68 if (!exynos_fb->exynos_gem_obj && exynos_fb->buffer)
67 exynos_drm_buf_destroy(fb->dev, exynos_fb->entry); 69 exynos_drm_buf_destroy(fb->dev, exynos_fb->buffer);
68 70
69 kfree(exynos_fb); 71 kfree(exynos_fb);
70 exynos_fb = NULL; 72 exynos_fb = NULL;
@@ -143,29 +145,29 @@ exynos_drm_fb_init(struct drm_file *file_priv, struct drm_device *dev,
143 */ 145 */
144 if (!mode_cmd->handle) { 146 if (!mode_cmd->handle) {
145 if (!file_priv) { 147 if (!file_priv) {
146 struct exynos_drm_buf_entry *entry; 148 struct exynos_drm_gem_buf *buffer;
147 149
148 /* 150 /*
149 * in case that file_priv is NULL, it allocates 151 * in case that file_priv is NULL, it allocates
150 * only buffer and this buffer would be used 152 * only buffer and this buffer would be used
151 * for default framebuffer. 153 * for default framebuffer.
152 */ 154 */
153 entry = exynos_drm_buf_create(dev, size); 155 buffer = exynos_drm_buf_create(dev, size);
154 if (IS_ERR(entry)) { 156 if (IS_ERR(buffer)) {
155 ret = PTR_ERR(entry); 157 ret = PTR_ERR(buffer);
156 goto err_buffer; 158 goto err_buffer;
157 } 159 }
158 160
159 exynos_fb->entry = entry; 161 exynos_fb->buffer = buffer;
160 162
161 DRM_LOG_KMS("default fb: paddr = 0x%lx, size = 0x%x\n", 163 DRM_LOG_KMS("default: dma_addr = 0x%lx, size = 0x%x\n",
162 (unsigned long)entry->paddr, size); 164 (unsigned long)buffer->dma_addr, size);
163 165
164 goto out; 166 goto out;
165 } else { 167 } else {
166 exynos_gem_obj = exynos_drm_gem_create(file_priv, dev, 168 exynos_gem_obj = exynos_drm_gem_create(dev, file_priv,
167 size, 169 &mode_cmd->handle,
168 &mode_cmd->handle); 170 size);
169 if (IS_ERR(exynos_gem_obj)) { 171 if (IS_ERR(exynos_gem_obj)) {
170 ret = PTR_ERR(exynos_gem_obj); 172 ret = PTR_ERR(exynos_gem_obj);
171 goto err_buffer; 173 goto err_buffer;
@@ -189,10 +191,10 @@ exynos_drm_fb_init(struct drm_file *file_priv, struct drm_device *dev,
189 * so that default framebuffer has no its own gem object, 191 * so that default framebuffer has no its own gem object,
190 * only its own buffer object. 192 * only its own buffer object.
191 */ 193 */
192 exynos_fb->entry = exynos_gem_obj->entry; 194 exynos_fb->buffer = exynos_gem_obj->buffer;
193 195
194 DRM_LOG_KMS("paddr = 0x%lx, size = 0x%x, gem object = 0x%x\n", 196 DRM_LOG_KMS("dma_addr = 0x%lx, size = 0x%x, gem object = 0x%x\n",
195 (unsigned long)exynos_fb->entry->paddr, size, 197 (unsigned long)exynos_fb->buffer->dma_addr, size,
196 (unsigned int)&exynos_gem_obj->base); 198 (unsigned int)&exynos_gem_obj->base);
197 199
198out: 200out:
@@ -220,26 +222,36 @@ struct drm_framebuffer *exynos_drm_fb_create(struct drm_device *dev,
220 return exynos_drm_fb_init(file_priv, dev, mode_cmd); 222 return exynos_drm_fb_init(file_priv, dev, mode_cmd);
221} 223}
222 224
223struct exynos_drm_buf_entry *exynos_drm_fb_get_buf(struct drm_framebuffer *fb) 225struct exynos_drm_gem_buf *exynos_drm_fb_get_buf(struct drm_framebuffer *fb)
224{ 226{
225 struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); 227 struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
226 struct exynos_drm_buf_entry *entry; 228 struct exynos_drm_gem_buf *buffer;
227 229
228 DRM_DEBUG_KMS("%s\n", __FILE__); 230 DRM_DEBUG_KMS("%s\n", __FILE__);
229 231
230 entry = exynos_fb->entry; 232 buffer = exynos_fb->buffer;
231 if (!entry) 233 if (!buffer)
232 return NULL; 234 return NULL;
233 235
234 DRM_DEBUG_KMS("vaddr = 0x%lx, paddr = 0x%lx\n", 236 DRM_DEBUG_KMS("vaddr = 0x%lx, dma_addr = 0x%lx\n",
235 (unsigned long)entry->vaddr, 237 (unsigned long)buffer->kvaddr,
236 (unsigned long)entry->paddr); 238 (unsigned long)buffer->dma_addr);
237 239
238 return entry; 240 return buffer;
241}
242
243static void exynos_drm_output_poll_changed(struct drm_device *dev)
244{
245 struct exynos_drm_private *private = dev->dev_private;
246 struct drm_fb_helper *fb_helper = private->fb_helper;
247
248 if (fb_helper)
249 drm_fb_helper_hotplug_event(fb_helper);
239} 250}
240 251
241static struct drm_mode_config_funcs exynos_drm_mode_config_funcs = { 252static struct drm_mode_config_funcs exynos_drm_mode_config_funcs = {
242 .fb_create = exynos_drm_fb_create, 253 .fb_create = exynos_drm_fb_create,
254 .output_poll_changed = exynos_drm_output_poll_changed,
243}; 255};
244 256
245void exynos_drm_mode_config_init(struct drm_device *dev) 257void exynos_drm_mode_config_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 81fba29b696d..f79f768a56ca 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -33,6 +33,7 @@
33 33
34#include "exynos_drm_drv.h" 34#include "exynos_drm_drv.h"
35#include "exynos_drm_fb.h" 35#include "exynos_drm_fb.h"
36#include "exynos_drm_gem.h"
36#include "exynos_drm_buf.h" 37#include "exynos_drm_buf.h"
37 38
38#define MAX_CONNECTOR 4 39#define MAX_CONNECTOR 4
@@ -85,15 +86,13 @@ static struct fb_ops exynos_drm_fb_ops = {
85}; 86};
86 87
87static int exynos_drm_fbdev_update(struct drm_fb_helper *helper, 88static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
88 struct drm_framebuffer *fb, 89 struct drm_framebuffer *fb)
89 unsigned int fb_width,
90 unsigned int fb_height)
91{ 90{
92 struct fb_info *fbi = helper->fbdev; 91 struct fb_info *fbi = helper->fbdev;
93 struct drm_device *dev = helper->dev; 92 struct drm_device *dev = helper->dev;
94 struct exynos_drm_fbdev *exynos_fb = to_exynos_fbdev(helper); 93 struct exynos_drm_fbdev *exynos_fb = to_exynos_fbdev(helper);
95 struct exynos_drm_buf_entry *entry; 94 struct exynos_drm_gem_buf *buffer;
96 unsigned int size = fb_width * fb_height * (fb->bits_per_pixel >> 3); 95 unsigned int size = fb->width * fb->height * (fb->bits_per_pixel >> 3);
97 unsigned long offset; 96 unsigned long offset;
98 97
99 DRM_DEBUG_KMS("%s\n", __FILE__); 98 DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -101,20 +100,20 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
101 exynos_fb->fb = fb; 100 exynos_fb->fb = fb;
102 101
103 drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth); 102 drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
104 drm_fb_helper_fill_var(fbi, helper, fb_width, fb_height); 103 drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height);
105 104
106 entry = exynos_drm_fb_get_buf(fb); 105 buffer = exynos_drm_fb_get_buf(fb);
107 if (!entry) { 106 if (!buffer) {
108 DRM_LOG_KMS("entry is null.\n"); 107 DRM_LOG_KMS("buffer is null.\n");
109 return -EFAULT; 108 return -EFAULT;
110 } 109 }
111 110
112 offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3); 111 offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3);
113 offset += fbi->var.yoffset * fb->pitches[0]; 112 offset += fbi->var.yoffset * fb->pitches[0];
114 113
115 dev->mode_config.fb_base = entry->paddr; 114 dev->mode_config.fb_base = (resource_size_t)buffer->dma_addr;
116 fbi->screen_base = entry->vaddr + offset; 115 fbi->screen_base = buffer->kvaddr + offset;
117 fbi->fix.smem_start = entry->paddr + offset; 116 fbi->fix.smem_start = (unsigned long)(buffer->dma_addr + offset);
118 fbi->screen_size = size; 117 fbi->screen_size = size;
119 fbi->fix.smem_len = size; 118 fbi->fix.smem_len = size;
120 119
@@ -171,8 +170,7 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
171 goto out; 170 goto out;
172 } 171 }
173 172
174 ret = exynos_drm_fbdev_update(helper, helper->fb, sizes->fb_width, 173 ret = exynos_drm_fbdev_update(helper, helper->fb);
175 sizes->fb_height);
176 if (ret < 0) 174 if (ret < 0)
177 fb_dealloc_cmap(&fbi->cmap); 175 fb_dealloc_cmap(&fbi->cmap);
178 176
@@ -235,8 +233,7 @@ static int exynos_drm_fbdev_recreate(struct drm_fb_helper *helper,
235 } 233 }
236 234
237 helper->fb = exynos_fbdev->fb; 235 helper->fb = exynos_fbdev->fb;
238 return exynos_drm_fbdev_update(helper, helper->fb, sizes->fb_width, 236 return exynos_drm_fbdev_update(helper, helper->fb);
239 sizes->fb_height);
240} 237}
241 238
242static int exynos_drm_fbdev_probe(struct drm_fb_helper *helper, 239static int exynos_drm_fbdev_probe(struct drm_fb_helper *helper,
@@ -405,6 +402,18 @@ int exynos_drm_fbdev_reinit(struct drm_device *dev)
405 fb_helper = private->fb_helper; 402 fb_helper = private->fb_helper;
406 403
407 if (fb_helper) { 404 if (fb_helper) {
405 struct list_head temp_list;
406
407 INIT_LIST_HEAD(&temp_list);
408
409 /*
410 * fb_helper is reintialized but kernel fb is reused
411 * so kernel_fb_list need to be backuped and restored
412 */
413 if (!list_empty(&fb_helper->kernel_fb_list))
414 list_replace_init(&fb_helper->kernel_fb_list,
415 &temp_list);
416
408 drm_fb_helper_fini(fb_helper); 417 drm_fb_helper_fini(fb_helper);
409 418
410 ret = drm_fb_helper_init(dev, fb_helper, 419 ret = drm_fb_helper_init(dev, fb_helper,
@@ -414,6 +423,9 @@ int exynos_drm_fbdev_reinit(struct drm_device *dev)
414 return ret; 423 return ret;
415 } 424 }
416 425
426 if (!list_empty(&temp_list))
427 list_replace(&temp_list, &fb_helper->kernel_fb_list);
428
417 ret = drm_fb_helper_single_add_all_connectors(fb_helper); 429 ret = drm_fb_helper_single_add_all_connectors(fb_helper);
418 if (ret < 0) { 430 if (ret < 0) {
419 DRM_ERROR("failed to add fb helper to connectors\n"); 431 DRM_ERROR("failed to add fb helper to connectors\n");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 4659c88cdd9b..db3b3d9e731d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -64,7 +64,7 @@ struct fimd_win_data {
64 unsigned int fb_width; 64 unsigned int fb_width;
65 unsigned int fb_height; 65 unsigned int fb_height;
66 unsigned int bpp; 66 unsigned int bpp;
67 dma_addr_t paddr; 67 dma_addr_t dma_addr;
68 void __iomem *vaddr; 68 void __iomem *vaddr;
69 unsigned int buf_offsize; 69 unsigned int buf_offsize;
70 unsigned int line_size; /* bytes */ 70 unsigned int line_size; /* bytes */
@@ -124,7 +124,7 @@ static int fimd_display_power_on(struct device *dev, int mode)
124 return 0; 124 return 0;
125} 125}
126 126
127static struct exynos_drm_display fimd_display = { 127static struct exynos_drm_display_ops fimd_display_ops = {
128 .type = EXYNOS_DISPLAY_TYPE_LCD, 128 .type = EXYNOS_DISPLAY_TYPE_LCD,
129 .is_connected = fimd_display_is_connected, 129 .is_connected = fimd_display_is_connected,
130 .get_timing = fimd_get_timing, 130 .get_timing = fimd_get_timing,
@@ -177,6 +177,40 @@ static void fimd_commit(struct device *dev)
177 writel(val, ctx->regs + VIDCON0); 177 writel(val, ctx->regs + VIDCON0);
178} 178}
179 179
180static void fimd_disable(struct device *dev)
181{
182 struct fimd_context *ctx = get_fimd_context(dev);
183 struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
184 struct drm_device *drm_dev = subdrv->drm_dev;
185 struct exynos_drm_manager *manager = &subdrv->manager;
186 u32 val;
187
188 DRM_DEBUG_KMS("%s\n", __FILE__);
189
190 /* fimd dma off */
191 val = readl(ctx->regs + VIDCON0);
192 val &= ~(VIDCON0_ENVID | VIDCON0_ENVID_F);
193 writel(val, ctx->regs + VIDCON0);
194
195 /*
196 * if vblank is enabled status with dma off then
197 * it disables vsync interrupt.
198 */
199 if (drm_dev->vblank_enabled[manager->pipe] &&
200 atomic_read(&drm_dev->vblank_refcount[manager->pipe])) {
201 drm_vblank_put(drm_dev, manager->pipe);
202
203 /*
204 * if vblank_disable_allowed is 0 then disable
205 * vsync interrupt right now else the vsync interrupt
206 * would be disabled by drm timer once a current process
207 * gives up ownershop of vblank event.
208 */
209 if (!drm_dev->vblank_disable_allowed)
210 drm_vblank_off(drm_dev, manager->pipe);
211 }
212}
213
180static int fimd_enable_vblank(struct device *dev) 214static int fimd_enable_vblank(struct device *dev)
181{ 215{
182 struct fimd_context *ctx = get_fimd_context(dev); 216 struct fimd_context *ctx = get_fimd_context(dev);
@@ -220,6 +254,7 @@ static void fimd_disable_vblank(struct device *dev)
220 254
221static struct exynos_drm_manager_ops fimd_manager_ops = { 255static struct exynos_drm_manager_ops fimd_manager_ops = {
222 .commit = fimd_commit, 256 .commit = fimd_commit,
257 .disable = fimd_disable,
223 .enable_vblank = fimd_enable_vblank, 258 .enable_vblank = fimd_enable_vblank,
224 .disable_vblank = fimd_disable_vblank, 259 .disable_vblank = fimd_disable_vblank,
225}; 260};
@@ -251,7 +286,7 @@ static void fimd_win_mode_set(struct device *dev,
251 win_data->ovl_height = overlay->crtc_height; 286 win_data->ovl_height = overlay->crtc_height;
252 win_data->fb_width = overlay->fb_width; 287 win_data->fb_width = overlay->fb_width;
253 win_data->fb_height = overlay->fb_height; 288 win_data->fb_height = overlay->fb_height;
254 win_data->paddr = overlay->paddr + offset; 289 win_data->dma_addr = overlay->dma_addr + offset;
255 win_data->vaddr = overlay->vaddr + offset; 290 win_data->vaddr = overlay->vaddr + offset;
256 win_data->bpp = overlay->bpp; 291 win_data->bpp = overlay->bpp;
257 win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) * 292 win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) *
@@ -263,7 +298,7 @@ static void fimd_win_mode_set(struct device *dev,
263 DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n", 298 DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
264 win_data->ovl_width, win_data->ovl_height); 299 win_data->ovl_width, win_data->ovl_height);
265 DRM_DEBUG_KMS("paddr = 0x%lx, vaddr = 0x%lx\n", 300 DRM_DEBUG_KMS("paddr = 0x%lx, vaddr = 0x%lx\n",
266 (unsigned long)win_data->paddr, 301 (unsigned long)win_data->dma_addr,
267 (unsigned long)win_data->vaddr); 302 (unsigned long)win_data->vaddr);
268 DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n", 303 DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n",
269 overlay->fb_width, overlay->crtc_width); 304 overlay->fb_width, overlay->crtc_width);
@@ -376,16 +411,16 @@ static void fimd_win_commit(struct device *dev)
376 writel(val, ctx->regs + SHADOWCON); 411 writel(val, ctx->regs + SHADOWCON);
377 412
378 /* buffer start address */ 413 /* buffer start address */
379 val = win_data->paddr; 414 val = (unsigned long)win_data->dma_addr;
380 writel(val, ctx->regs + VIDWx_BUF_START(win, 0)); 415 writel(val, ctx->regs + VIDWx_BUF_START(win, 0));
381 416
382 /* buffer end address */ 417 /* buffer end address */
383 size = win_data->fb_width * win_data->ovl_height * (win_data->bpp >> 3); 418 size = win_data->fb_width * win_data->ovl_height * (win_data->bpp >> 3);
384 val = win_data->paddr + size; 419 val = (unsigned long)(win_data->dma_addr + size);
385 writel(val, ctx->regs + VIDWx_BUF_END(win, 0)); 420 writel(val, ctx->regs + VIDWx_BUF_END(win, 0));
386 421
387 DRM_DEBUG_KMS("start addr = 0x%lx, end addr = 0x%lx, size = 0x%lx\n", 422 DRM_DEBUG_KMS("start addr = 0x%lx, end addr = 0x%lx, size = 0x%lx\n",
388 (unsigned long)win_data->paddr, val, size); 423 (unsigned long)win_data->dma_addr, val, size);
389 DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n", 424 DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
390 win_data->ovl_width, win_data->ovl_height); 425 win_data->ovl_width, win_data->ovl_height);
391 426
@@ -447,7 +482,6 @@ static void fimd_win_commit(struct device *dev)
447static void fimd_win_disable(struct device *dev) 482static void fimd_win_disable(struct device *dev)
448{ 483{
449 struct fimd_context *ctx = get_fimd_context(dev); 484 struct fimd_context *ctx = get_fimd_context(dev);
450 struct fimd_win_data *win_data;
451 int win = ctx->default_win; 485 int win = ctx->default_win;
452 u32 val; 486 u32 val;
453 487
@@ -456,8 +490,6 @@ static void fimd_win_disable(struct device *dev)
456 if (win < 0 || win > WINDOWS_NR) 490 if (win < 0 || win > WINDOWS_NR)
457 return; 491 return;
458 492
459 win_data = &ctx->win_data[win];
460
461 /* protect windows */ 493 /* protect windows */
462 val = readl(ctx->regs + SHADOWCON); 494 val = readl(ctx->regs + SHADOWCON);
463 val |= SHADOWCON_WINx_PROTECT(win); 495 val |= SHADOWCON_WINx_PROTECT(win);
@@ -528,6 +560,16 @@ static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
528 /* VSYNC interrupt */ 560 /* VSYNC interrupt */
529 writel(VIDINTCON1_INT_FRAME, ctx->regs + VIDINTCON1); 561 writel(VIDINTCON1_INT_FRAME, ctx->regs + VIDINTCON1);
530 562
563 /*
564 * in case that vblank_disable_allowed is 1, it could induce
565 * the problem that manager->pipe could be -1 because with
566 * disable callback, vsync interrupt isn't disabled and at this moment,
567 * vsync interrupt could occur. the vsync interrupt would be disabled
568 * by timer handler later.
569 */
570 if (manager->pipe == -1)
571 return IRQ_HANDLED;
572
531 drm_handle_vblank(drm_dev, manager->pipe); 573 drm_handle_vblank(drm_dev, manager->pipe);
532 fimd_finish_pageflip(drm_dev, manager->pipe); 574 fimd_finish_pageflip(drm_dev, manager->pipe);
533 575
@@ -548,13 +590,6 @@ static int fimd_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
548 */ 590 */
549 drm_dev->irq_enabled = 1; 591 drm_dev->irq_enabled = 1;
550 592
551 /*
552 * with vblank_disable_allowed = 1, vblank interrupt will be disabled
553 * by drm timer once a current process gives up ownership of
554 * vblank event.(drm_vblank_put function was called)
555 */
556 drm_dev->vblank_disable_allowed = 1;
557
558 return 0; 593 return 0;
559} 594}
560 595
@@ -731,7 +766,7 @@ static int __devinit fimd_probe(struct platform_device *pdev)
731 subdrv->manager.pipe = -1; 766 subdrv->manager.pipe = -1;
732 subdrv->manager.ops = &fimd_manager_ops; 767 subdrv->manager.ops = &fimd_manager_ops;
733 subdrv->manager.overlay_ops = &fimd_overlay_ops; 768 subdrv->manager.overlay_ops = &fimd_overlay_ops;
734 subdrv->manager.display = &fimd_display; 769 subdrv->manager.display_ops = &fimd_display_ops;
735 subdrv->manager.dev = dev; 770 subdrv->manager.dev = dev;
736 771
737 platform_set_drvdata(pdev, ctx); 772 platform_set_drvdata(pdev, ctx);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index a8e7a88906ed..aba0fe47f7ea 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -62,40 +62,28 @@ static unsigned int get_gem_mmap_offset(struct drm_gem_object *obj)
62 return (unsigned int)obj->map_list.hash.key << PAGE_SHIFT; 62 return (unsigned int)obj->map_list.hash.key << PAGE_SHIFT;
63} 63}
64 64
65struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_file *file_priv, 65static struct exynos_drm_gem_obj
66 struct drm_device *dev, unsigned int size, 66 *exynos_drm_gem_init(struct drm_device *drm_dev,
67 unsigned int *handle) 67 struct drm_file *file_priv, unsigned int *handle,
68 unsigned int size)
68{ 69{
69 struct exynos_drm_gem_obj *exynos_gem_obj; 70 struct exynos_drm_gem_obj *exynos_gem_obj;
70 struct exynos_drm_buf_entry *entry;
71 struct drm_gem_object *obj; 71 struct drm_gem_object *obj;
72 int ret; 72 int ret;
73 73
74 DRM_DEBUG_KMS("%s\n", __FILE__);
75
76 size = roundup(size, PAGE_SIZE);
77
78 exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL); 74 exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
79 if (!exynos_gem_obj) { 75 if (!exynos_gem_obj) {
80 DRM_ERROR("failed to allocate exynos gem object.\n"); 76 DRM_ERROR("failed to allocate exynos gem object.\n");
81 return ERR_PTR(-ENOMEM); 77 return ERR_PTR(-ENOMEM);
82 } 78 }
83 79
84 /* allocate the new buffer object and memory region. */
85 entry = exynos_drm_buf_create(dev, size);
86 if (!entry) {
87 kfree(exynos_gem_obj);
88 return ERR_PTR(-ENOMEM);
89 }
90
91 exynos_gem_obj->entry = entry;
92
93 obj = &exynos_gem_obj->base; 80 obj = &exynos_gem_obj->base;
94 81
95 ret = drm_gem_object_init(dev, obj, size); 82 ret = drm_gem_object_init(drm_dev, obj, size);
96 if (ret < 0) { 83 if (ret < 0) {
97 DRM_ERROR("failed to initailize gem object.\n"); 84 DRM_ERROR("failed to initialize gem object.\n");
98 goto err_obj_init; 85 ret = -EINVAL;
86 goto err_object_init;
99 } 87 }
100 88
101 DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp); 89 DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
@@ -127,24 +115,50 @@ err_handle_create:
127err_create_mmap_offset: 115err_create_mmap_offset:
128 drm_gem_object_release(obj); 116 drm_gem_object_release(obj);
129 117
130err_obj_init: 118err_object_init:
131 exynos_drm_buf_destroy(dev, exynos_gem_obj->entry);
132
133 kfree(exynos_gem_obj); 119 kfree(exynos_gem_obj);
134 120
135 return ERR_PTR(ret); 121 return ERR_PTR(ret);
136} 122}
137 123
124struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
125 struct drm_file *file_priv,
126 unsigned int *handle, unsigned long size)
127{
128
129 struct exynos_drm_gem_obj *exynos_gem_obj = NULL;
130 struct exynos_drm_gem_buf *buffer;
131
132 size = roundup(size, PAGE_SIZE);
133
134 DRM_DEBUG_KMS("%s: size = 0x%lx\n", __FILE__, size);
135
136 buffer = exynos_drm_buf_create(dev, size);
137 if (IS_ERR(buffer)) {
138 return ERR_CAST(buffer);
139 }
140
141 exynos_gem_obj = exynos_drm_gem_init(dev, file_priv, handle, size);
142 if (IS_ERR(exynos_gem_obj)) {
143 exynos_drm_buf_destroy(dev, buffer);
144 return exynos_gem_obj;
145 }
146
147 exynos_gem_obj->buffer = buffer;
148
149 return exynos_gem_obj;
150}
151
138int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data, 152int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
139 struct drm_file *file_priv) 153 struct drm_file *file_priv)
140{ 154{
141 struct drm_exynos_gem_create *args = data; 155 struct drm_exynos_gem_create *args = data;
142 struct exynos_drm_gem_obj *exynos_gem_obj; 156 struct exynos_drm_gem_obj *exynos_gem_obj = NULL;
143 157
144 DRM_DEBUG_KMS("%s : size = 0x%x\n", __FILE__, args->size); 158 DRM_DEBUG_KMS("%s\n", __FILE__);
145 159
146 exynos_gem_obj = exynos_drm_gem_create(file_priv, dev, args->size, 160 exynos_gem_obj = exynos_drm_gem_create(dev, file_priv,
147 &args->handle); 161 &args->handle, args->size);
148 if (IS_ERR(exynos_gem_obj)) 162 if (IS_ERR(exynos_gem_obj))
149 return PTR_ERR(exynos_gem_obj); 163 return PTR_ERR(exynos_gem_obj);
150 164
@@ -175,7 +189,7 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
175{ 189{
176 struct drm_gem_object *obj = filp->private_data; 190 struct drm_gem_object *obj = filp->private_data;
177 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); 191 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
178 struct exynos_drm_buf_entry *entry; 192 struct exynos_drm_gem_buf *buffer;
179 unsigned long pfn, vm_size; 193 unsigned long pfn, vm_size;
180 194
181 DRM_DEBUG_KMS("%s\n", __FILE__); 195 DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -187,20 +201,20 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
187 201
188 vm_size = vma->vm_end - vma->vm_start; 202 vm_size = vma->vm_end - vma->vm_start;
189 /* 203 /*
190 * a entry contains information to physically continuous memory 204 * a buffer contains information to physically continuous memory
191 * allocated by user request or at framebuffer creation. 205 * allocated by user request or at framebuffer creation.
192 */ 206 */
193 entry = exynos_gem_obj->entry; 207 buffer = exynos_gem_obj->buffer;
194 208
195 /* check if user-requested size is valid. */ 209 /* check if user-requested size is valid. */
196 if (vm_size > entry->size) 210 if (vm_size > buffer->size)
197 return -EINVAL; 211 return -EINVAL;
198 212
199 /* 213 /*
200 * get page frame number to physical memory to be mapped 214 * get page frame number to physical memory to be mapped
201 * to user space. 215 * to user space.
202 */ 216 */
203 pfn = exynos_gem_obj->entry->paddr >> PAGE_SHIFT; 217 pfn = ((unsigned long)exynos_gem_obj->buffer->dma_addr) >> PAGE_SHIFT;
204 218
205 DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn); 219 DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn);
206 220
@@ -281,7 +295,7 @@ void exynos_drm_gem_free_object(struct drm_gem_object *gem_obj)
281 295
282 exynos_gem_obj = to_exynos_gem_obj(gem_obj); 296 exynos_gem_obj = to_exynos_gem_obj(gem_obj);
283 297
284 exynos_drm_buf_destroy(gem_obj->dev, exynos_gem_obj->entry); 298 exynos_drm_buf_destroy(gem_obj->dev, exynos_gem_obj->buffer);
285 299
286 kfree(exynos_gem_obj); 300 kfree(exynos_gem_obj);
287} 301}
@@ -302,8 +316,8 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
302 args->pitch = args->width * args->bpp >> 3; 316 args->pitch = args->width * args->bpp >> 3;
303 args->size = args->pitch * args->height; 317 args->size = args->pitch * args->height;
304 318
305 exynos_gem_obj = exynos_drm_gem_create(file_priv, dev, args->size, 319 exynos_gem_obj = exynos_drm_gem_create(dev, file_priv, &args->handle,
306 &args->handle); 320 args->size);
307 if (IS_ERR(exynos_gem_obj)) 321 if (IS_ERR(exynos_gem_obj))
308 return PTR_ERR(exynos_gem_obj); 322 return PTR_ERR(exynos_gem_obj);
309 323
@@ -360,7 +374,8 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
360 374
361 mutex_lock(&dev->struct_mutex); 375 mutex_lock(&dev->struct_mutex);
362 376
363 pfn = (exynos_gem_obj->entry->paddr >> PAGE_SHIFT) + page_offset; 377 pfn = (((unsigned long)exynos_gem_obj->buffer->dma_addr) >>
378 PAGE_SHIFT) + page_offset;
364 379
365 ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn); 380 ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
366 381
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index e5fc0148277b..ef8797334e6d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -30,13 +30,29 @@
30 struct exynos_drm_gem_obj, base) 30 struct exynos_drm_gem_obj, base)
31 31
32/* 32/*
33 * exynos drm gem buffer structure.
34 *
35 * @kvaddr: kernel virtual address to allocated memory region.
36 * @dma_addr: bus address(accessed by dma) to allocated memory region.
37 * - this address could be physical address without IOMMU and
38 * device address with IOMMU.
39 * @size: size of allocated memory region.
40 */
41struct exynos_drm_gem_buf {
42 void __iomem *kvaddr;
43 dma_addr_t dma_addr;
44 unsigned long size;
45};
46
47/*
33 * exynos drm buffer structure. 48 * exynos drm buffer structure.
34 * 49 *
35 * @base: a gem object. 50 * @base: a gem object.
36 * - a new handle to this gem object would be created 51 * - a new handle to this gem object would be created
37 * by drm_gem_handle_create(). 52 * by drm_gem_handle_create().
38 * @entry: pointer to exynos drm buffer entry object. 53 * @buffer: a pointer to exynos_drm_gem_buffer object.
39 * - containing the information to physically 54 * - contain the information to memory region allocated
55 * by user request or at framebuffer creation.
40 * continuous memory region allocated by user request 56 * continuous memory region allocated by user request
41 * or at framebuffer creation. 57 * or at framebuffer creation.
42 * 58 *
@@ -45,13 +61,13 @@
45 */ 61 */
46struct exynos_drm_gem_obj { 62struct exynos_drm_gem_obj {
47 struct drm_gem_object base; 63 struct drm_gem_object base;
48 struct exynos_drm_buf_entry *entry; 64 struct exynos_drm_gem_buf *buffer;
49}; 65};
50 66
51/* create a new buffer and get a new gem handle. */ 67/* create a new buffer and get a new gem handle. */
52struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_file *file_priv, 68struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
53 struct drm_device *dev, unsigned int size, 69 struct drm_file *file_priv,
54 unsigned int *handle); 70 unsigned int *handle, unsigned long size);
55 71
56/* 72/*
57 * request gem object creation and buffer allocation as the size 73 * request gem object creation and buffer allocation as the size
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index d14b44e13f51..004b048c5192 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -62,6 +62,7 @@ static int i915_capabilities(struct seq_file *m, void *data)
62 const struct intel_device_info *info = INTEL_INFO(dev); 62 const struct intel_device_info *info = INTEL_INFO(dev);
63 63
64 seq_printf(m, "gen: %d\n", info->gen); 64 seq_printf(m, "gen: %d\n", info->gen);
65 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
65#define B(x) seq_printf(m, #x ": %s\n", yesno(info->x)) 66#define B(x) seq_printf(m, #x ": %s\n", yesno(info->x))
66 B(is_mobile); 67 B(is_mobile);
67 B(is_i85x); 68 B(is_i85x);
@@ -636,11 +637,16 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data)
636 struct drm_device *dev = node->minor->dev; 637 struct drm_device *dev = node->minor->dev;
637 drm_i915_private_t *dev_priv = dev->dev_private; 638 drm_i915_private_t *dev_priv = dev->dev_private;
638 struct intel_ring_buffer *ring; 639 struct intel_ring_buffer *ring;
640 int ret;
639 641
640 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; 642 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
641 if (ring->size == 0) 643 if (ring->size == 0)
642 return 0; 644 return 0;
643 645
646 ret = mutex_lock_interruptible(&dev->struct_mutex);
647 if (ret)
648 return ret;
649
644 seq_printf(m, "Ring %s:\n", ring->name); 650 seq_printf(m, "Ring %s:\n", ring->name);
645 seq_printf(m, " Head : %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR); 651 seq_printf(m, " Head : %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR);
646 seq_printf(m, " Tail : %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR); 652 seq_printf(m, " Tail : %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR);
@@ -654,6 +660,8 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data)
654 seq_printf(m, " Control : %08x\n", I915_READ_CTL(ring)); 660 seq_printf(m, " Control : %08x\n", I915_READ_CTL(ring));
655 seq_printf(m, " Start : %08x\n", I915_READ_START(ring)); 661 seq_printf(m, " Start : %08x\n", I915_READ_START(ring));
656 662
663 mutex_unlock(&dev->struct_mutex);
664
657 return 0; 665 return 0;
658} 666}
659 667
@@ -842,7 +850,16 @@ static int i915_rstdby_delays(struct seq_file *m, void *unused)
842 struct drm_info_node *node = (struct drm_info_node *) m->private; 850 struct drm_info_node *node = (struct drm_info_node *) m->private;
843 struct drm_device *dev = node->minor->dev; 851 struct drm_device *dev = node->minor->dev;
844 drm_i915_private_t *dev_priv = dev->dev_private; 852 drm_i915_private_t *dev_priv = dev->dev_private;
845 u16 crstanddelay = I915_READ16(CRSTANDVID); 853 u16 crstanddelay;
854 int ret;
855
856 ret = mutex_lock_interruptible(&dev->struct_mutex);
857 if (ret)
858 return ret;
859
860 crstanddelay = I915_READ16(CRSTANDVID);
861
862 mutex_unlock(&dev->struct_mutex);
846 863
847 seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f)); 864 seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
848 865
@@ -940,7 +957,11 @@ static int i915_delayfreq_table(struct seq_file *m, void *unused)
940 struct drm_device *dev = node->minor->dev; 957 struct drm_device *dev = node->minor->dev;
941 drm_i915_private_t *dev_priv = dev->dev_private; 958 drm_i915_private_t *dev_priv = dev->dev_private;
942 u32 delayfreq; 959 u32 delayfreq;
943 int i; 960 int ret, i;
961
962 ret = mutex_lock_interruptible(&dev->struct_mutex);
963 if (ret)
964 return ret;
944 965
945 for (i = 0; i < 16; i++) { 966 for (i = 0; i < 16; i++) {
946 delayfreq = I915_READ(PXVFREQ_BASE + i * 4); 967 delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
@@ -948,6 +969,8 @@ static int i915_delayfreq_table(struct seq_file *m, void *unused)
948 (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT); 969 (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT);
949 } 970 }
950 971
972 mutex_unlock(&dev->struct_mutex);
973
951 return 0; 974 return 0;
952} 975}
953 976
@@ -962,13 +985,19 @@ static int i915_inttoext_table(struct seq_file *m, void *unused)
962 struct drm_device *dev = node->minor->dev; 985 struct drm_device *dev = node->minor->dev;
963 drm_i915_private_t *dev_priv = dev->dev_private; 986 drm_i915_private_t *dev_priv = dev->dev_private;
964 u32 inttoext; 987 u32 inttoext;
965 int i; 988 int ret, i;
989
990 ret = mutex_lock_interruptible(&dev->struct_mutex);
991 if (ret)
992 return ret;
966 993
967 for (i = 1; i <= 32; i++) { 994 for (i = 1; i <= 32; i++) {
968 inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4); 995 inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
969 seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext); 996 seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
970 } 997 }
971 998
999 mutex_unlock(&dev->struct_mutex);
1000
972 return 0; 1001 return 0;
973} 1002}
974 1003
@@ -977,9 +1006,19 @@ static int i915_drpc_info(struct seq_file *m, void *unused)
977 struct drm_info_node *node = (struct drm_info_node *) m->private; 1006 struct drm_info_node *node = (struct drm_info_node *) m->private;
978 struct drm_device *dev = node->minor->dev; 1007 struct drm_device *dev = node->minor->dev;
979 drm_i915_private_t *dev_priv = dev->dev_private; 1008 drm_i915_private_t *dev_priv = dev->dev_private;
980 u32 rgvmodectl = I915_READ(MEMMODECTL); 1009 u32 rgvmodectl, rstdbyctl;
981 u32 rstdbyctl = I915_READ(RSTDBYCTL); 1010 u16 crstandvid;
982 u16 crstandvid = I915_READ16(CRSTANDVID); 1011 int ret;
1012
1013 ret = mutex_lock_interruptible(&dev->struct_mutex);
1014 if (ret)
1015 return ret;
1016
1017 rgvmodectl = I915_READ(MEMMODECTL);
1018 rstdbyctl = I915_READ(RSTDBYCTL);
1019 crstandvid = I915_READ16(CRSTANDVID);
1020
1021 mutex_unlock(&dev->struct_mutex);
983 1022
984 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ? 1023 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
985 "yes" : "no"); 1024 "yes" : "no");
@@ -1167,9 +1206,16 @@ static int i915_gfxec(struct seq_file *m, void *unused)
1167 struct drm_info_node *node = (struct drm_info_node *) m->private; 1206 struct drm_info_node *node = (struct drm_info_node *) m->private;
1168 struct drm_device *dev = node->minor->dev; 1207 struct drm_device *dev = node->minor->dev;
1169 drm_i915_private_t *dev_priv = dev->dev_private; 1208 drm_i915_private_t *dev_priv = dev->dev_private;
1209 int ret;
1210
1211 ret = mutex_lock_interruptible(&dev->struct_mutex);
1212 if (ret)
1213 return ret;
1170 1214
1171 seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4)); 1215 seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
1172 1216
1217 mutex_unlock(&dev->struct_mutex);
1218
1173 return 0; 1219 return 0;
1174} 1220}
1175 1221
@@ -1506,7 +1552,10 @@ drm_add_fake_info_node(struct drm_minor *minor,
1506 node->minor = minor; 1552 node->minor = minor;
1507 node->dent = ent; 1553 node->dent = ent;
1508 node->info_ent = (void *) key; 1554 node->info_ent = (void *) key;
1509 list_add(&node->list, &minor->debugfs_nodes.list); 1555
1556 mutex_lock(&minor->debugfs_lock);
1557 list_add(&node->list, &minor->debugfs_list);
1558 mutex_unlock(&minor->debugfs_lock);
1510 1559
1511 return 0; 1560 return 0;
1512} 1561}
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index a9533c54c93c..a9ae374861e7 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1454,6 +1454,14 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
1454 1454
1455 diff1 = now - dev_priv->last_time1; 1455 diff1 = now - dev_priv->last_time1;
1456 1456
1457 /* Prevent division-by-zero if we are asking too fast.
1458 * Also, we don't get interesting results if we are polling
1459 * faster than once in 10ms, so just return the saved value
1460 * in such cases.
1461 */
1462 if (diff1 <= 10)
1463 return dev_priv->chipset_power;
1464
1457 count1 = I915_READ(DMIEC); 1465 count1 = I915_READ(DMIEC);
1458 count2 = I915_READ(DDREC); 1466 count2 = I915_READ(DDREC);
1459 count3 = I915_READ(CSIEC); 1467 count3 = I915_READ(CSIEC);
@@ -1484,6 +1492,8 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
1484 dev_priv->last_count1 = total_count; 1492 dev_priv->last_count1 = total_count;
1485 dev_priv->last_time1 = now; 1493 dev_priv->last_time1 = now;
1486 1494
1495 dev_priv->chipset_power = ret;
1496
1487 return ret; 1497 return ret;
1488} 1498}
1489 1499
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 9f592703c369..22c8ab70db2c 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -58,17 +58,17 @@ module_param_named(powersave, i915_powersave, int, 0600);
58MODULE_PARM_DESC(powersave, 58MODULE_PARM_DESC(powersave,
59 "Enable powersavings, fbc, downclocking, etc. (default: true)"); 59 "Enable powersavings, fbc, downclocking, etc. (default: true)");
60 60
61unsigned int i915_semaphores __read_mostly = 0; 61int i915_semaphores __read_mostly = -1;
62module_param_named(semaphores, i915_semaphores, int, 0600); 62module_param_named(semaphores, i915_semaphores, int, 0600);
63MODULE_PARM_DESC(semaphores, 63MODULE_PARM_DESC(semaphores,
64 "Use semaphores for inter-ring sync (default: false)"); 64 "Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))");
65 65
66unsigned int i915_enable_rc6 __read_mostly = 0; 66int i915_enable_rc6 __read_mostly = -1;
67module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600); 67module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
68MODULE_PARM_DESC(i915_enable_rc6, 68MODULE_PARM_DESC(i915_enable_rc6,
69 "Enable power-saving render C-state 6 (default: true)"); 69 "Enable power-saving render C-state 6 (default: -1 (use per-chip default)");
70 70
71unsigned int i915_enable_fbc __read_mostly = -1; 71int i915_enable_fbc __read_mostly = -1;
72module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600); 72module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);
73MODULE_PARM_DESC(i915_enable_fbc, 73MODULE_PARM_DESC(i915_enable_fbc,
74 "Enable frame buffer compression for power savings " 74 "Enable frame buffer compression for power savings "
@@ -80,7 +80,7 @@ MODULE_PARM_DESC(lvds_downclock,
80 "Use panel (LVDS/eDP) downclocking for power savings " 80 "Use panel (LVDS/eDP) downclocking for power savings "
81 "(default: false)"); 81 "(default: false)");
82 82
83unsigned int i915_panel_use_ssc __read_mostly = -1; 83int i915_panel_use_ssc __read_mostly = -1;
84module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600); 84module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600);
85MODULE_PARM_DESC(lvds_use_ssc, 85MODULE_PARM_DESC(lvds_use_ssc,
86 "Use Spread Spectrum Clock with panels [LVDS/eDP] " 86 "Use Spread Spectrum Clock with panels [LVDS/eDP] "
@@ -107,7 +107,7 @@ static struct drm_driver driver;
107extern int intel_agp_enabled; 107extern int intel_agp_enabled;
108 108
109#define INTEL_VGA_DEVICE(id, info) { \ 109#define INTEL_VGA_DEVICE(id, info) { \
110 .class = PCI_CLASS_DISPLAY_VGA << 8, \ 110 .class = PCI_BASE_CLASS_DISPLAY << 16, \
111 .class_mask = 0xff0000, \ 111 .class_mask = 0xff0000, \
112 .vendor = 0x8086, \ 112 .vendor = 0x8086, \
113 .device = id, \ 113 .device = id, \
@@ -328,7 +328,7 @@ void intel_detect_pch(struct drm_device *dev)
328 } 328 }
329} 329}
330 330
331static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) 331void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
332{ 332{
333 int count; 333 int count;
334 334
@@ -344,6 +344,22 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
344 udelay(10); 344 udelay(10);
345} 345}
346 346
347void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
348{
349 int count;
350
351 count = 0;
352 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1))
353 udelay(10);
354
355 I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 1);
356 POSTING_READ(FORCEWAKE_MT);
357
358 count = 0;
359 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1) == 0)
360 udelay(10);
361}
362
347/* 363/*
348 * Generally this is called implicitly by the register read function. However, 364 * Generally this is called implicitly by the register read function. However,
349 * if some sequence requires the GT to not power down then this function should 365 * if some sequence requires the GT to not power down then this function should
@@ -356,15 +372,21 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
356 372
357 /* Forcewake is atomic in case we get in here without the lock */ 373 /* Forcewake is atomic in case we get in here without the lock */
358 if (atomic_add_return(1, &dev_priv->forcewake_count) == 1) 374 if (atomic_add_return(1, &dev_priv->forcewake_count) == 1)
359 __gen6_gt_force_wake_get(dev_priv); 375 dev_priv->display.force_wake_get(dev_priv);
360} 376}
361 377
362static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) 378void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
363{ 379{
364 I915_WRITE_NOTRACE(FORCEWAKE, 0); 380 I915_WRITE_NOTRACE(FORCEWAKE, 0);
365 POSTING_READ(FORCEWAKE); 381 POSTING_READ(FORCEWAKE);
366} 382}
367 383
384void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
385{
386 I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 0);
387 POSTING_READ(FORCEWAKE_MT);
388}
389
368/* 390/*
369 * see gen6_gt_force_wake_get() 391 * see gen6_gt_force_wake_get()
370 */ 392 */
@@ -373,7 +395,7 @@ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
373 WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); 395 WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
374 396
375 if (atomic_dec_and_test(&dev_priv->forcewake_count)) 397 if (atomic_dec_and_test(&dev_priv->forcewake_count))
376 __gen6_gt_force_wake_put(dev_priv); 398 dev_priv->display.force_wake_put(dev_priv);
377} 399}
378 400
379void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) 401void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
@@ -804,8 +826,8 @@ static const struct file_operations i915_driver_fops = {
804}; 826};
805 827
806static struct drm_driver driver = { 828static struct drm_driver driver = {
807 /* don't use mtrr's here, the Xserver or user space app should 829 /* Don't use MTRRs here; the Xserver or userspace app should
808 * deal with them for intel hardware. 830 * deal with them for Intel hardware.
809 */ 831 */
810 .driver_features = 832 .driver_features =
811 DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/ 833 DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/
@@ -904,8 +926,9 @@ MODULE_LICENSE("GPL and additional rights");
904/* We give fast paths for the really cool registers */ 926/* We give fast paths for the really cool registers */
905#define NEEDS_FORCE_WAKE(dev_priv, reg) \ 927#define NEEDS_FORCE_WAKE(dev_priv, reg) \
906 (((dev_priv)->info->gen >= 6) && \ 928 (((dev_priv)->info->gen >= 6) && \
907 ((reg) < 0x40000) && \ 929 ((reg) < 0x40000) && \
908 ((reg) != FORCEWAKE)) 930 ((reg) != FORCEWAKE) && \
931 ((reg) != ECOBUS))
909 932
910#define __i915_read(x, y) \ 933#define __i915_read(x, y) \
911u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ 934u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 06a37f4fd74b..554bef7a3b9c 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -107,6 +107,7 @@ struct opregion_header;
107struct opregion_acpi; 107struct opregion_acpi;
108struct opregion_swsci; 108struct opregion_swsci;
109struct opregion_asle; 109struct opregion_asle;
110struct drm_i915_private;
110 111
111struct intel_opregion { 112struct intel_opregion {
112 struct opregion_header *header; 113 struct opregion_header *header;
@@ -126,6 +127,9 @@ struct drm_i915_master_private {
126 struct _drm_i915_sarea *sarea_priv; 127 struct _drm_i915_sarea *sarea_priv;
127}; 128};
128#define I915_FENCE_REG_NONE -1 129#define I915_FENCE_REG_NONE -1
130#define I915_MAX_NUM_FENCES 16
131/* 16 fences + sign bit for FENCE_REG_NONE */
132#define I915_MAX_NUM_FENCE_BITS 5
129 133
130struct drm_i915_fence_reg { 134struct drm_i915_fence_reg {
131 struct list_head lru_list; 135 struct list_head lru_list;
@@ -168,7 +172,7 @@ struct drm_i915_error_state {
168 u32 instdone1; 172 u32 instdone1;
169 u32 seqno; 173 u32 seqno;
170 u64 bbaddr; 174 u64 bbaddr;
171 u64 fence[16]; 175 u64 fence[I915_MAX_NUM_FENCES];
172 struct timeval time; 176 struct timeval time;
173 struct drm_i915_error_object { 177 struct drm_i915_error_object {
174 int page_count; 178 int page_count;
@@ -182,7 +186,7 @@ struct drm_i915_error_state {
182 u32 gtt_offset; 186 u32 gtt_offset;
183 u32 read_domains; 187 u32 read_domains;
184 u32 write_domain; 188 u32 write_domain;
185 s32 fence_reg:5; 189 s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
186 s32 pinned:2; 190 s32 pinned:2;
187 u32 tiling:2; 191 u32 tiling:2;
188 u32 dirty:1; 192 u32 dirty:1;
@@ -218,6 +222,8 @@ struct drm_i915_display_funcs {
218 struct drm_i915_gem_object *obj); 222 struct drm_i915_gem_object *obj);
219 int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb, 223 int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
220 int x, int y); 224 int x, int y);
225 void (*force_wake_get)(struct drm_i915_private *dev_priv);
226 void (*force_wake_put)(struct drm_i915_private *dev_priv);
221 /* clock updates for mode set */ 227 /* clock updates for mode set */
222 /* cursor updates */ 228 /* cursor updates */
223 /* render clock increase/decrease */ 229 /* render clock increase/decrease */
@@ -375,7 +381,7 @@ typedef struct drm_i915_private {
375 struct notifier_block lid_notifier; 381 struct notifier_block lid_notifier;
376 382
377 int crt_ddc_pin; 383 int crt_ddc_pin;
378 struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */ 384 struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
379 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ 385 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
380 int num_fence_regs; /* 8 on pre-965, 16 otherwise */ 386 int num_fence_regs; /* 8 on pre-965, 16 otherwise */
381 387
@@ -506,7 +512,7 @@ typedef struct drm_i915_private {
506 u8 saveAR[21]; 512 u8 saveAR[21];
507 u8 saveDACMASK; 513 u8 saveDACMASK;
508 u8 saveCR[37]; 514 u8 saveCR[37];
509 uint64_t saveFENCE[16]; 515 uint64_t saveFENCE[I915_MAX_NUM_FENCES];
510 u32 saveCURACNTR; 516 u32 saveCURACNTR;
511 u32 saveCURAPOS; 517 u32 saveCURAPOS;
512 u32 saveCURABASE; 518 u32 saveCURABASE;
@@ -707,6 +713,7 @@ typedef struct drm_i915_private {
707 713
708 u64 last_count1; 714 u64 last_count1;
709 unsigned long last_time1; 715 unsigned long last_time1;
716 unsigned long chipset_power;
710 u64 last_count2; 717 u64 last_count2;
711 struct timespec last_time2; 718 struct timespec last_time2;
712 unsigned long gfx_power; 719 unsigned long gfx_power;
@@ -777,10 +784,8 @@ struct drm_i915_gem_object {
777 * Fence register bits (if any) for this object. Will be set 784 * Fence register bits (if any) for this object. Will be set
778 * as needed when mapped into the GTT. 785 * as needed when mapped into the GTT.
779 * Protected by dev->struct_mutex. 786 * Protected by dev->struct_mutex.
780 *
781 * Size: 4 bits for 16 fences + sign (for FENCE_REG_NONE)
782 */ 787 */
783 signed int fence_reg:5; 788 signed int fence_reg:I915_MAX_NUM_FENCE_BITS;
784 789
785 /** 790 /**
786 * Advice: are the backing pages purgeable? 791 * Advice: are the backing pages purgeable?
@@ -997,12 +1002,12 @@ extern int i915_max_ioctl;
997extern unsigned int i915_fbpercrtc __always_unused; 1002extern unsigned int i915_fbpercrtc __always_unused;
998extern int i915_panel_ignore_lid __read_mostly; 1003extern int i915_panel_ignore_lid __read_mostly;
999extern unsigned int i915_powersave __read_mostly; 1004extern unsigned int i915_powersave __read_mostly;
1000extern unsigned int i915_semaphores __read_mostly; 1005extern int i915_semaphores __read_mostly;
1001extern unsigned int i915_lvds_downclock __read_mostly; 1006extern unsigned int i915_lvds_downclock __read_mostly;
1002extern unsigned int i915_panel_use_ssc __read_mostly; 1007extern int i915_panel_use_ssc __read_mostly;
1003extern int i915_vbt_sdvo_panel_type __read_mostly; 1008extern int i915_vbt_sdvo_panel_type __read_mostly;
1004extern unsigned int i915_enable_rc6 __read_mostly; 1009extern int i915_enable_rc6 __read_mostly;
1005extern unsigned int i915_enable_fbc __read_mostly; 1010extern int i915_enable_fbc __read_mostly;
1006extern bool i915_enable_hangcheck __read_mostly; 1011extern bool i915_enable_hangcheck __read_mostly;
1007 1012
1008extern int i915_suspend(struct drm_device *dev, pm_message_t state); 1013extern int i915_suspend(struct drm_device *dev, pm_message_t state);
@@ -1307,6 +1312,11 @@ extern void gen6_set_rps(struct drm_device *dev, u8 val);
1307extern void intel_detect_pch(struct drm_device *dev); 1312extern void intel_detect_pch(struct drm_device *dev);
1308extern int intel_trans_dp_port_sel(struct drm_crtc *crtc); 1313extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
1309 1314
1315extern void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
1316extern void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv);
1317extern void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
1318extern void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv);
1319
1310/* overlay */ 1320/* overlay */
1311#ifdef CONFIG_DEBUG_FS 1321#ifdef CONFIG_DEBUG_FS
1312extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); 1322extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
@@ -1351,8 +1361,9 @@ void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
1351/* We give fast paths for the really cool registers */ 1361/* We give fast paths for the really cool registers */
1352#define NEEDS_FORCE_WAKE(dev_priv, reg) \ 1362#define NEEDS_FORCE_WAKE(dev_priv, reg) \
1353 (((dev_priv)->info->gen >= 6) && \ 1363 (((dev_priv)->info->gen >= 6) && \
1354 ((reg) < 0x40000) && \ 1364 ((reg) < 0x40000) && \
1355 ((reg) != FORCEWAKE)) 1365 ((reg) != FORCEWAKE) && \
1366 ((reg) != ECOBUS))
1356 1367
1357#define __i915_read(x, y) \ 1368#define __i915_read(x, y) \
1358 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg); 1369 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 6651c36b6e8a..8359dc777041 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1396,7 +1396,7 @@ i915_gem_mmap_gtt(struct drm_file *file,
1396 1396
1397 if (obj->base.size > dev_priv->mm.gtt_mappable_end) { 1397 if (obj->base.size > dev_priv->mm.gtt_mappable_end) {
1398 ret = -E2BIG; 1398 ret = -E2BIG;
1399 goto unlock; 1399 goto out;
1400 } 1400 }
1401 1401
1402 if (obj->madv != I915_MADV_WILLNEED) { 1402 if (obj->madv != I915_MADV_WILLNEED) {
@@ -1745,7 +1745,7 @@ static void i915_gem_reset_fences(struct drm_device *dev)
1745 struct drm_i915_private *dev_priv = dev->dev_private; 1745 struct drm_i915_private *dev_priv = dev->dev_private;
1746 int i; 1746 int i;
1747 1747
1748 for (i = 0; i < 16; i++) { 1748 for (i = 0; i < dev_priv->num_fence_regs; i++) {
1749 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; 1749 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
1750 struct drm_i915_gem_object *obj = reg->obj; 1750 struct drm_i915_gem_object *obj = reg->obj;
1751 1751
@@ -3512,9 +3512,11 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3512 * so emit a request to do so. 3512 * so emit a request to do so.
3513 */ 3513 */
3514 request = kzalloc(sizeof(*request), GFP_KERNEL); 3514 request = kzalloc(sizeof(*request), GFP_KERNEL);
3515 if (request) 3515 if (request) {
3516 ret = i915_add_request(obj->ring, NULL, request); 3516 ret = i915_add_request(obj->ring, NULL, request);
3517 else 3517 if (ret)
3518 kfree(request);
3519 } else
3518 ret = -ENOMEM; 3520 ret = -ENOMEM;
3519 } 3521 }
3520 3522
@@ -3613,7 +3615,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3613 obj->base.write_domain = I915_GEM_DOMAIN_CPU; 3615 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3614 obj->base.read_domains = I915_GEM_DOMAIN_CPU; 3616 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3615 3617
3616 if (IS_GEN6(dev)) { 3618 if (IS_GEN6(dev) || IS_GEN7(dev)) {
3617 /* On Gen6, we can have the GPU use the LLC (the CPU 3619 /* On Gen6, we can have the GPU use the LLC (the CPU
3618 * cache) for about a 10% performance improvement 3620 * cache) for about a 10% performance improvement
3619 * compared to uncached. Graphics requests other than 3621 * compared to uncached. Graphics requests other than
@@ -3877,7 +3879,7 @@ i915_gem_load(struct drm_device *dev)
3877 INIT_LIST_HEAD(&dev_priv->mm.gtt_list); 3879 INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
3878 for (i = 0; i < I915_NUM_RINGS; i++) 3880 for (i = 0; i < I915_NUM_RINGS; i++)
3879 init_ring_lists(&dev_priv->ring[i]); 3881 init_ring_lists(&dev_priv->ring[i]);
3880 for (i = 0; i < 16; i++) 3882 for (i = 0; i < I915_MAX_NUM_FENCES; i++)
3881 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); 3883 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
3882 INIT_DELAYED_WORK(&dev_priv->mm.retire_work, 3884 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
3883 i915_gem_retire_work_handler); 3885 i915_gem_retire_work_handler);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 3693e83a97f3..c681dc149d2a 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -32,6 +32,7 @@
32#include "i915_drv.h" 32#include "i915_drv.h"
33#include "i915_trace.h" 33#include "i915_trace.h"
34#include "intel_drv.h" 34#include "intel_drv.h"
35#include <linux/dma_remapping.h>
35 36
36struct change_domains { 37struct change_domains {
37 uint32_t invalidate_domains; 38 uint32_t invalidate_domains;
@@ -746,6 +747,22 @@ i915_gem_execbuffer_flush(struct drm_device *dev,
746 return 0; 747 return 0;
747} 748}
748 749
750static bool
751intel_enable_semaphores(struct drm_device *dev)
752{
753 if (INTEL_INFO(dev)->gen < 6)
754 return 0;
755
756 if (i915_semaphores >= 0)
757 return i915_semaphores;
758
759 /* Enable semaphores on SNB when IO remapping is off */
760 if (INTEL_INFO(dev)->gen == 6)
761 return !intel_iommu_enabled;
762
763 return 1;
764}
765
749static int 766static int
750i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj, 767i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
751 struct intel_ring_buffer *to) 768 struct intel_ring_buffer *to)
@@ -758,7 +775,7 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
758 return 0; 775 return 0;
759 776
760 /* XXX gpu semaphores are implicated in various hard hangs on SNB */ 777 /* XXX gpu semaphores are implicated in various hard hangs on SNB */
761 if (INTEL_INFO(obj->base.dev)->gen < 6 || !i915_semaphores) 778 if (!intel_enable_semaphores(obj->base.dev))
762 return i915_gem_object_wait_rendering(obj); 779 return i915_gem_object_wait_rendering(obj);
763 780
764 idx = intel_ring_sync_index(from, to); 781 idx = intel_ring_sync_index(from, to);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 96643ee240da..3700df47ad93 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -824,6 +824,7 @@ static void i915_gem_record_fences(struct drm_device *dev,
824 824
825 /* Fences */ 825 /* Fences */
826 switch (INTEL_INFO(dev)->gen) { 826 switch (INTEL_INFO(dev)->gen) {
827 case 7:
827 case 6: 828 case 6:
828 for (i = 0; i < 16; i++) 829 for (i = 0; i < 16; i++)
829 error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); 830 error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 517bf0cda3e5..853f2f0acaa2 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -1553,12 +1553,21 @@
1553 */ 1553 */
1554#define PP_READY (1 << 30) 1554#define PP_READY (1 << 30)
1555#define PP_SEQUENCE_NONE (0 << 28) 1555#define PP_SEQUENCE_NONE (0 << 28)
1556#define PP_SEQUENCE_ON (1 << 28) 1556#define PP_SEQUENCE_POWER_UP (1 << 28)
1557#define PP_SEQUENCE_OFF (2 << 28) 1557#define PP_SEQUENCE_POWER_DOWN (2 << 28)
1558#define PP_SEQUENCE_MASK 0x30000000 1558#define PP_SEQUENCE_MASK (3 << 28)
1559#define PP_SEQUENCE_SHIFT 28
1559#define PP_CYCLE_DELAY_ACTIVE (1 << 27) 1560#define PP_CYCLE_DELAY_ACTIVE (1 << 27)
1560#define PP_SEQUENCE_STATE_ON_IDLE (1 << 3)
1561#define PP_SEQUENCE_STATE_MASK 0x0000000f 1561#define PP_SEQUENCE_STATE_MASK 0x0000000f
1562#define PP_SEQUENCE_STATE_OFF_IDLE (0x0 << 0)
1563#define PP_SEQUENCE_STATE_OFF_S0_1 (0x1 << 0)
1564#define PP_SEQUENCE_STATE_OFF_S0_2 (0x2 << 0)
1565#define PP_SEQUENCE_STATE_OFF_S0_3 (0x3 << 0)
1566#define PP_SEQUENCE_STATE_ON_IDLE (0x8 << 0)
1567#define PP_SEQUENCE_STATE_ON_S1_0 (0x9 << 0)
1568#define PP_SEQUENCE_STATE_ON_S1_2 (0xa << 0)
1569#define PP_SEQUENCE_STATE_ON_S1_3 (0xb << 0)
1570#define PP_SEQUENCE_STATE_RESET (0xf << 0)
1562#define PP_CONTROL 0x61204 1571#define PP_CONTROL 0x61204
1563#define POWER_TARGET_ON (1 << 0) 1572#define POWER_TARGET_ON (1 << 0)
1564#define PP_ON_DELAYS 0x61208 1573#define PP_ON_DELAYS 0x61208
@@ -3295,10 +3304,10 @@
3295/* or SDVOB */ 3304/* or SDVOB */
3296#define HDMIB 0xe1140 3305#define HDMIB 0xe1140
3297#define PORT_ENABLE (1 << 31) 3306#define PORT_ENABLE (1 << 31)
3298#define TRANSCODER_A (0) 3307#define TRANSCODER(pipe) ((pipe) << 30)
3299#define TRANSCODER_B (1 << 30) 3308#define TRANSCODER_CPT(pipe) ((pipe) << 29)
3300#define TRANSCODER(pipe) ((pipe) << 30) 3309#define TRANSCODER_MASK (1 << 30)
3301#define TRANSCODER_MASK (1 << 30) 3310#define TRANSCODER_MASK_CPT (3 << 29)
3302#define COLOR_FORMAT_8bpc (0) 3311#define COLOR_FORMAT_8bpc (0)
3303#define COLOR_FORMAT_12bpc (3 << 26) 3312#define COLOR_FORMAT_12bpc (3 << 26)
3304#define SDVOB_HOTPLUG_ENABLE (1 << 23) 3313#define SDVOB_HOTPLUG_ENABLE (1 << 23)
@@ -3439,12 +3448,38 @@
3439#define EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B (0x38<<22) 3448#define EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B (0x38<<22)
3440#define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f<<22) 3449#define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f<<22)
3441 3450
3451/* IVB */
3452#define EDP_LINK_TRAIN_400MV_0DB_IVB (0x24 <<22)
3453#define EDP_LINK_TRAIN_400MV_3_5DB_IVB (0x2a <<22)
3454#define EDP_LINK_TRAIN_400MV_6DB_IVB (0x2f <<22)
3455#define EDP_LINK_TRAIN_600MV_0DB_IVB (0x30 <<22)
3456#define EDP_LINK_TRAIN_600MV_3_5DB_IVB (0x36 <<22)
3457#define EDP_LINK_TRAIN_800MV_0DB_IVB (0x38 <<22)
3458#define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x33 <<22)
3459
3460/* legacy values */
3461#define EDP_LINK_TRAIN_500MV_0DB_IVB (0x00 <<22)
3462#define EDP_LINK_TRAIN_1000MV_0DB_IVB (0x20 <<22)
3463#define EDP_LINK_TRAIN_500MV_3_5DB_IVB (0x02 <<22)
3464#define EDP_LINK_TRAIN_1000MV_3_5DB_IVB (0x22 <<22)
3465#define EDP_LINK_TRAIN_1000MV_6DB_IVB (0x23 <<22)
3466
3467#define EDP_LINK_TRAIN_VOL_EMP_MASK_IVB (0x3f<<22)
3468
3442#define FORCEWAKE 0xA18C 3469#define FORCEWAKE 0xA18C
3443#define FORCEWAKE_ACK 0x130090 3470#define FORCEWAKE_ACK 0x130090
3471#define FORCEWAKE_MT 0xa188 /* multi-threaded */
3472#define FORCEWAKE_MT_ACK 0x130040
3473#define ECOBUS 0xa180
3474#define FORCEWAKE_MT_ENABLE (1<<5)
3444 3475
3445#define GT_FIFO_FREE_ENTRIES 0x120008 3476#define GT_FIFO_FREE_ENTRIES 0x120008
3446#define GT_FIFO_NUM_RESERVED_ENTRIES 20 3477#define GT_FIFO_NUM_RESERVED_ENTRIES 20
3447 3478
3479#define GEN6_UCGCTL2 0x9404
3480# define GEN6_RCPBUNIT_CLOCK_GATE_DISABLE (1 << 12)
3481# define GEN6_RCCUNIT_CLOCK_GATE_DISABLE (1 << 11)
3482
3448#define GEN6_RPNSWREQ 0xA008 3483#define GEN6_RPNSWREQ 0xA008
3449#define GEN6_TURBO_DISABLE (1<<31) 3484#define GEN6_TURBO_DISABLE (1<<31)
3450#define GEN6_FREQUENCY(x) ((x)<<25) 3485#define GEN6_FREQUENCY(x) ((x)<<25)
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index f8f602d76650..7886e4fb60e3 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -370,6 +370,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
370 370
371 /* Fences */ 371 /* Fences */
372 switch (INTEL_INFO(dev)->gen) { 372 switch (INTEL_INFO(dev)->gen) {
373 case 7:
373 case 6: 374 case 6:
374 for (i = 0; i < 16; i++) 375 for (i = 0; i < 16; i++)
375 dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); 376 dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
@@ -404,6 +405,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
404 405
405 /* Fences */ 406 /* Fences */
406 switch (INTEL_INFO(dev)->gen) { 407 switch (INTEL_INFO(dev)->gen) {
408 case 7:
407 case 6: 409 case 6:
408 for (i = 0; i < 16; i++) 410 for (i = 0; i < 16; i++)
409 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]); 411 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 8ecbc2f11633..5a3e7853003f 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -38,8 +38,8 @@
38#include "i915_drv.h" 38#include "i915_drv.h"
39#include "i915_trace.h" 39#include "i915_trace.h"
40#include "drm_dp_helper.h" 40#include "drm_dp_helper.h"
41
42#include "drm_crtc_helper.h" 41#include "drm_crtc_helper.h"
42#include <linux/dma_remapping.h>
43 43
44#define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) 44#define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
45 45
@@ -2933,7 +2933,8 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
2933 2933
2934 /* For PCH DP, enable TRANS_DP_CTL */ 2934 /* For PCH DP, enable TRANS_DP_CTL */
2935 if (HAS_PCH_CPT(dev) && 2935 if (HAS_PCH_CPT(dev) &&
2936 intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { 2936 (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
2937 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
2937 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5; 2938 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5;
2938 reg = TRANS_DP_CTL(pipe); 2939 reg = TRANS_DP_CTL(pipe);
2939 temp = I915_READ(reg); 2940 temp = I915_READ(reg);
@@ -4669,6 +4670,7 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
4669/** 4670/**
4670 * intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send 4671 * intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send
4671 * @crtc: CRTC structure 4672 * @crtc: CRTC structure
4673 * @mode: requested mode
4672 * 4674 *
4673 * A pipe may be connected to one or more outputs. Based on the depth of the 4675 * A pipe may be connected to one or more outputs. Based on the depth of the
4674 * attached framebuffer, choose a good color depth to use on the pipe. 4676 * attached framebuffer, choose a good color depth to use on the pipe.
@@ -4680,13 +4682,15 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
4680 * HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc 4682 * HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc
4681 * Displays may support a restricted set as well, check EDID and clamp as 4683 * Displays may support a restricted set as well, check EDID and clamp as
4682 * appropriate. 4684 * appropriate.
4685 * DP may want to dither down to 6bpc to fit larger modes
4683 * 4686 *
4684 * RETURNS: 4687 * RETURNS:
4685 * Dithering requirement (i.e. false if display bpc and pipe bpc match, 4688 * Dithering requirement (i.e. false if display bpc and pipe bpc match,
4686 * true if they don't match). 4689 * true if they don't match).
4687 */ 4690 */
4688static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc, 4691static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
4689 unsigned int *pipe_bpp) 4692 unsigned int *pipe_bpp,
4693 struct drm_display_mode *mode)
4690{ 4694{
4691 struct drm_device *dev = crtc->dev; 4695 struct drm_device *dev = crtc->dev;
4692 struct drm_i915_private *dev_priv = dev->dev_private; 4696 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4711,7 +4715,7 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
4711 lvds_bpc = 6; 4715 lvds_bpc = 6;
4712 4716
4713 if (lvds_bpc < display_bpc) { 4717 if (lvds_bpc < display_bpc) {
4714 DRM_DEBUG_DRIVER("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc); 4718 DRM_DEBUG_KMS("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc);
4715 display_bpc = lvds_bpc; 4719 display_bpc = lvds_bpc;
4716 } 4720 }
4717 continue; 4721 continue;
@@ -4722,7 +4726,7 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
4722 unsigned int edp_bpc = dev_priv->edp.bpp / 3; 4726 unsigned int edp_bpc = dev_priv->edp.bpp / 3;
4723 4727
4724 if (edp_bpc < display_bpc) { 4728 if (edp_bpc < display_bpc) {
4725 DRM_DEBUG_DRIVER("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc); 4729 DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
4726 display_bpc = edp_bpc; 4730 display_bpc = edp_bpc;
4727 } 4731 }
4728 continue; 4732 continue;
@@ -4737,7 +4741,7 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
4737 /* Don't use an invalid EDID bpc value */ 4741 /* Don't use an invalid EDID bpc value */
4738 if (connector->display_info.bpc && 4742 if (connector->display_info.bpc &&
4739 connector->display_info.bpc < display_bpc) { 4743 connector->display_info.bpc < display_bpc) {
4740 DRM_DEBUG_DRIVER("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc); 4744 DRM_DEBUG_KMS("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc);
4741 display_bpc = connector->display_info.bpc; 4745 display_bpc = connector->display_info.bpc;
4742 } 4746 }
4743 } 4747 }
@@ -4748,15 +4752,20 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
4748 */ 4752 */
4749 if (intel_encoder->type == INTEL_OUTPUT_HDMI) { 4753 if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
4750 if (display_bpc > 8 && display_bpc < 12) { 4754 if (display_bpc > 8 && display_bpc < 12) {
4751 DRM_DEBUG_DRIVER("forcing bpc to 12 for HDMI\n"); 4755 DRM_DEBUG_KMS("forcing bpc to 12 for HDMI\n");
4752 display_bpc = 12; 4756 display_bpc = 12;
4753 } else { 4757 } else {
4754 DRM_DEBUG_DRIVER("forcing bpc to 8 for HDMI\n"); 4758 DRM_DEBUG_KMS("forcing bpc to 8 for HDMI\n");
4755 display_bpc = 8; 4759 display_bpc = 8;
4756 } 4760 }
4757 } 4761 }
4758 } 4762 }
4759 4763
4764 if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
4765 DRM_DEBUG_KMS("Dithering DP to 6bpc\n");
4766 display_bpc = 6;
4767 }
4768
4760 /* 4769 /*
4761 * We could just drive the pipe at the highest bpc all the time and 4770 * We could just drive the pipe at the highest bpc all the time and
4762 * enable dithering as needed, but that costs bandwidth. So choose 4771 * enable dithering as needed, but that costs bandwidth. So choose
@@ -4789,8 +4798,8 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
4789 4798
4790 display_bpc = min(display_bpc, bpc); 4799 display_bpc = min(display_bpc, bpc);
4791 4800
4792 DRM_DEBUG_DRIVER("setting pipe bpc to %d (max display bpc %d)\n", 4801 DRM_DEBUG_KMS("setting pipe bpc to %d (max display bpc %d)\n",
4793 bpc, display_bpc); 4802 bpc, display_bpc);
4794 4803
4795 *pipe_bpp = display_bpc * 3; 4804 *pipe_bpp = display_bpc * 3;
4796 4805
@@ -5018,6 +5027,16 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
5018 pipeconf &= ~PIPECONF_DOUBLE_WIDE; 5027 pipeconf &= ~PIPECONF_DOUBLE_WIDE;
5019 } 5028 }
5020 5029
5030 /* default to 8bpc */
5031 pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN);
5032 if (is_dp) {
5033 if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
5034 pipeconf |= PIPECONF_BPP_6 |
5035 PIPECONF_DITHER_EN |
5036 PIPECONF_DITHER_TYPE_SP;
5037 }
5038 }
5039
5021 dpll |= DPLL_VCO_ENABLE; 5040 dpll |= DPLL_VCO_ENABLE;
5022 5041
5023 DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); 5042 DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
@@ -5479,7 +5498,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5479 /* determine panel color depth */ 5498 /* determine panel color depth */
5480 temp = I915_READ(PIPECONF(pipe)); 5499 temp = I915_READ(PIPECONF(pipe));
5481 temp &= ~PIPE_BPC_MASK; 5500 temp &= ~PIPE_BPC_MASK;
5482 dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp); 5501 dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, mode);
5483 switch (pipe_bpp) { 5502 switch (pipe_bpp) {
5484 case 18: 5503 case 18:
5485 temp |= PIPE_6BPC; 5504 temp |= PIPE_6BPC;
@@ -5671,7 +5690,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5671 pipeconf &= ~PIPECONF_DITHER_TYPE_MASK; 5690 pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
5672 if ((is_lvds && dev_priv->lvds_dither) || dither) { 5691 if ((is_lvds && dev_priv->lvds_dither) || dither) {
5673 pipeconf |= PIPECONF_DITHER_EN; 5692 pipeconf |= PIPECONF_DITHER_EN;
5674 pipeconf |= PIPECONF_DITHER_TYPE_ST1; 5693 pipeconf |= PIPECONF_DITHER_TYPE_SP;
5675 } 5694 }
5676 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { 5695 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5677 intel_dp_set_m_n(crtc, mode, adjusted_mode); 5696 intel_dp_set_m_n(crtc, mode, adjusted_mode);
@@ -7188,11 +7207,16 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
7188 work->old_fb_obj = intel_fb->obj; 7207 work->old_fb_obj = intel_fb->obj;
7189 INIT_WORK(&work->work, intel_unpin_work_fn); 7208 INIT_WORK(&work->work, intel_unpin_work_fn);
7190 7209
7210 ret = drm_vblank_get(dev, intel_crtc->pipe);
7211 if (ret)
7212 goto free_work;
7213
7191 /* We borrow the event spin lock for protecting unpin_work */ 7214 /* We borrow the event spin lock for protecting unpin_work */
7192 spin_lock_irqsave(&dev->event_lock, flags); 7215 spin_lock_irqsave(&dev->event_lock, flags);
7193 if (intel_crtc->unpin_work) { 7216 if (intel_crtc->unpin_work) {
7194 spin_unlock_irqrestore(&dev->event_lock, flags); 7217 spin_unlock_irqrestore(&dev->event_lock, flags);
7195 kfree(work); 7218 kfree(work);
7219 drm_vblank_put(dev, intel_crtc->pipe);
7196 7220
7197 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); 7221 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
7198 return -EBUSY; 7222 return -EBUSY;
@@ -7211,10 +7235,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
7211 7235
7212 crtc->fb = fb; 7236 crtc->fb = fb;
7213 7237
7214 ret = drm_vblank_get(dev, intel_crtc->pipe);
7215 if (ret)
7216 goto cleanup_objs;
7217
7218 work->pending_flip_obj = obj; 7238 work->pending_flip_obj = obj;
7219 7239
7220 work->enable_stall_check = true; 7240 work->enable_stall_check = true;
@@ -7237,7 +7257,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
7237 7257
7238cleanup_pending: 7258cleanup_pending:
7239 atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip); 7259 atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
7240cleanup_objs:
7241 drm_gem_object_unreference(&work->old_fb_obj->base); 7260 drm_gem_object_unreference(&work->old_fb_obj->base);
7242 drm_gem_object_unreference(&obj->base); 7261 drm_gem_object_unreference(&obj->base);
7243 mutex_unlock(&dev->struct_mutex); 7262 mutex_unlock(&dev->struct_mutex);
@@ -7246,6 +7265,8 @@ cleanup_objs:
7246 intel_crtc->unpin_work = NULL; 7265 intel_crtc->unpin_work = NULL;
7247 spin_unlock_irqrestore(&dev->event_lock, flags); 7266 spin_unlock_irqrestore(&dev->event_lock, flags);
7248 7267
7268 drm_vblank_put(dev, intel_crtc->pipe);
7269free_work:
7249 kfree(work); 7270 kfree(work);
7250 7271
7251 return ret; 7272 return ret;
@@ -7891,6 +7912,33 @@ void intel_init_emon(struct drm_device *dev)
7891 dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK); 7912 dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
7892} 7913}
7893 7914
7915static bool intel_enable_rc6(struct drm_device *dev)
7916{
7917 /*
7918 * Respect the kernel parameter if it is set
7919 */
7920 if (i915_enable_rc6 >= 0)
7921 return i915_enable_rc6;
7922
7923 /*
7924 * Disable RC6 on Ironlake
7925 */
7926 if (INTEL_INFO(dev)->gen == 5)
7927 return 0;
7928
7929 /*
7930 * Enable rc6 on Sandybridge if DMA remapping is disabled
7931 */
7932 if (INTEL_INFO(dev)->gen == 6) {
7933 DRM_DEBUG_DRIVER("Sandybridge: intel_iommu_enabled %s -- RC6 %sabled\n",
7934 intel_iommu_enabled ? "true" : "false",
7935 !intel_iommu_enabled ? "en" : "dis");
7936 return !intel_iommu_enabled;
7937 }
7938 DRM_DEBUG_DRIVER("RC6 enabled\n");
7939 return 1;
7940}
7941
7894void gen6_enable_rps(struct drm_i915_private *dev_priv) 7942void gen6_enable_rps(struct drm_i915_private *dev_priv)
7895{ 7943{
7896 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 7944 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
@@ -7927,7 +7975,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
7927 I915_WRITE(GEN6_RC6p_THRESHOLD, 100000); 7975 I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
7928 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */ 7976 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
7929 7977
7930 if (i915_enable_rc6) 7978 if (intel_enable_rc6(dev_priv->dev))
7931 rc6_mask = GEN6_RC_CTL_RC6p_ENABLE | 7979 rc6_mask = GEN6_RC_CTL_RC6p_ENABLE |
7932 GEN6_RC_CTL_RC6_ENABLE; 7980 GEN6_RC_CTL_RC6_ENABLE;
7933 7981
@@ -8153,6 +8201,20 @@ static void gen6_init_clock_gating(struct drm_device *dev)
8153 I915_WRITE(WM2_LP_ILK, 0); 8201 I915_WRITE(WM2_LP_ILK, 0);
8154 I915_WRITE(WM1_LP_ILK, 0); 8202 I915_WRITE(WM1_LP_ILK, 0);
8155 8203
8204 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
8205 * gating disable must be set. Failure to set it results in
8206 * flickering pixels due to Z write ordering failures after
8207 * some amount of runtime in the Mesa "fire" demo, and Unigine
8208 * Sanctuary and Tropics, and apparently anything else with
8209 * alpha test or pixel discard.
8210 *
8211 * According to the spec, bit 11 (RCCUNIT) must also be set,
8212 * but we didn't debug actual testcases to find it out.
8213 */
8214 I915_WRITE(GEN6_UCGCTL2,
8215 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
8216 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
8217
8156 /* 8218 /*
8157 * According to the spec the following bits should be 8219 * According to the spec the following bits should be
8158 * set in order to enable memory self-refresh and fbc: 8220 * set in order to enable memory self-refresh and fbc:
@@ -8362,7 +8424,7 @@ void ironlake_enable_rc6(struct drm_device *dev)
8362 /* rc6 disabled by default due to repeated reports of hanging during 8424 /* rc6 disabled by default due to repeated reports of hanging during
8363 * boot and resume. 8425 * boot and resume.
8364 */ 8426 */
8365 if (!i915_enable_rc6) 8427 if (!intel_enable_rc6(dev))
8366 return; 8428 return;
8367 8429
8368 mutex_lock(&dev->struct_mutex); 8430 mutex_lock(&dev->struct_mutex);
@@ -8481,6 +8543,28 @@ static void intel_init_display(struct drm_device *dev)
8481 8543
8482 /* For FIFO watermark updates */ 8544 /* For FIFO watermark updates */
8483 if (HAS_PCH_SPLIT(dev)) { 8545 if (HAS_PCH_SPLIT(dev)) {
8546 dev_priv->display.force_wake_get = __gen6_gt_force_wake_get;
8547 dev_priv->display.force_wake_put = __gen6_gt_force_wake_put;
8548
8549 /* IVB configs may use multi-threaded forcewake */
8550 if (IS_IVYBRIDGE(dev)) {
8551 u32 ecobus;
8552
8553 mutex_lock(&dev->struct_mutex);
8554 __gen6_gt_force_wake_mt_get(dev_priv);
8555 ecobus = I915_READ(ECOBUS);
8556 __gen6_gt_force_wake_mt_put(dev_priv);
8557 mutex_unlock(&dev->struct_mutex);
8558
8559 if (ecobus & FORCEWAKE_MT_ENABLE) {
8560 DRM_DEBUG_KMS("Using MT version of forcewake\n");
8561 dev_priv->display.force_wake_get =
8562 __gen6_gt_force_wake_mt_get;
8563 dev_priv->display.force_wake_put =
8564 __gen6_gt_force_wake_mt_put;
8565 }
8566 }
8567
8484 if (HAS_PCH_IBX(dev)) 8568 if (HAS_PCH_IBX(dev))
8485 dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating; 8569 dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
8486 else if (HAS_PCH_CPT(dev)) 8570 else if (HAS_PCH_CPT(dev))
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 09b318b0227f..92b041b66e49 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -59,7 +59,6 @@ struct intel_dp {
59 struct i2c_algo_dp_aux_data algo; 59 struct i2c_algo_dp_aux_data algo;
60 bool is_pch_edp; 60 bool is_pch_edp;
61 uint8_t train_set[4]; 61 uint8_t train_set[4];
62 uint8_t link_status[DP_LINK_STATUS_SIZE];
63 int panel_power_up_delay; 62 int panel_power_up_delay;
64 int panel_power_down_delay; 63 int panel_power_down_delay;
65 int panel_power_cycle_delay; 64 int panel_power_cycle_delay;
@@ -68,7 +67,6 @@ struct intel_dp {
68 struct drm_display_mode *panel_fixed_mode; /* for eDP */ 67 struct drm_display_mode *panel_fixed_mode; /* for eDP */
69 struct delayed_work panel_vdd_work; 68 struct delayed_work panel_vdd_work;
70 bool want_panel_vdd; 69 bool want_panel_vdd;
71 unsigned long panel_off_jiffies;
72}; 70};
73 71
74/** 72/**
@@ -157,16 +155,12 @@ intel_edp_link_config(struct intel_encoder *intel_encoder,
157static int 155static int
158intel_dp_max_lane_count(struct intel_dp *intel_dp) 156intel_dp_max_lane_count(struct intel_dp *intel_dp)
159{ 157{
160 int max_lane_count = 4; 158 int max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f;
161 159 switch (max_lane_count) {
162 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) { 160 case 1: case 2: case 4:
163 max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f; 161 break;
164 switch (max_lane_count) { 162 default:
165 case 1: case 2: case 4: 163 max_lane_count = 4;
166 break;
167 default:
168 max_lane_count = 4;
169 }
170 } 164 }
171 return max_lane_count; 165 return max_lane_count;
172} 166}
@@ -214,13 +208,15 @@ intel_dp_link_clock(uint8_t link_bw)
214 */ 208 */
215 209
216static int 210static int
217intel_dp_link_required(struct intel_dp *intel_dp, int pixel_clock) 211intel_dp_link_required(struct intel_dp *intel_dp, int pixel_clock, int check_bpp)
218{ 212{
219 struct drm_crtc *crtc = intel_dp->base.base.crtc; 213 struct drm_crtc *crtc = intel_dp->base.base.crtc;
220 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 214 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
221 int bpp = 24; 215 int bpp = 24;
222 216
223 if (intel_crtc) 217 if (check_bpp)
218 bpp = check_bpp;
219 else if (intel_crtc)
224 bpp = intel_crtc->bpp; 220 bpp = intel_crtc->bpp;
225 221
226 return (pixel_clock * bpp + 9) / 10; 222 return (pixel_clock * bpp + 9) / 10;
@@ -239,6 +235,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
239 struct intel_dp *intel_dp = intel_attached_dp(connector); 235 struct intel_dp *intel_dp = intel_attached_dp(connector);
240 int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp)); 236 int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
241 int max_lanes = intel_dp_max_lane_count(intel_dp); 237 int max_lanes = intel_dp_max_lane_count(intel_dp);
238 int max_rate, mode_rate;
242 239
243 if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) { 240 if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
244 if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay) 241 if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay)
@@ -248,9 +245,17 @@ intel_dp_mode_valid(struct drm_connector *connector,
248 return MODE_PANEL; 245 return MODE_PANEL;
249 } 246 }
250 247
251 if (intel_dp_link_required(intel_dp, mode->clock) 248 mode_rate = intel_dp_link_required(intel_dp, mode->clock, 0);
252 > intel_dp_max_data_rate(max_link_clock, max_lanes)) 249 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
253 return MODE_CLOCK_HIGH; 250
251 if (mode_rate > max_rate) {
252 mode_rate = intel_dp_link_required(intel_dp,
253 mode->clock, 18);
254 if (mode_rate > max_rate)
255 return MODE_CLOCK_HIGH;
256 else
257 mode->private_flags |= INTEL_MODE_DP_FORCE_6BPC;
258 }
254 259
255 if (mode->clock < 10000) 260 if (mode->clock < 10000)
256 return MODE_CLOCK_LOW; 261 return MODE_CLOCK_LOW;
@@ -368,8 +373,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
368 * clock divider. 373 * clock divider.
369 */ 374 */
370 if (is_cpu_edp(intel_dp)) { 375 if (is_cpu_edp(intel_dp)) {
371 if (IS_GEN6(dev)) 376 if (IS_GEN6(dev) || IS_GEN7(dev))
372 aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */ 377 aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */
373 else 378 else
374 aux_clock_divider = 225; /* eDP input clock at 450Mhz */ 379 aux_clock_divider = 225; /* eDP input clock at 450Mhz */
375 } else if (HAS_PCH_SPLIT(dev)) 380 } else if (HAS_PCH_SPLIT(dev))
@@ -678,6 +683,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
678 int lane_count, clock; 683 int lane_count, clock;
679 int max_lane_count = intel_dp_max_lane_count(intel_dp); 684 int max_lane_count = intel_dp_max_lane_count(intel_dp);
680 int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0; 685 int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
686 int bpp = mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 0;
681 static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; 687 static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
682 688
683 if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) { 689 if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
@@ -695,7 +701,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
695 for (clock = 0; clock <= max_clock; clock++) { 701 for (clock = 0; clock <= max_clock; clock++) {
696 int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count); 702 int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
697 703
698 if (intel_dp_link_required(intel_dp, mode->clock) 704 if (intel_dp_link_required(intel_dp, mode->clock, bpp)
699 <= link_avail) { 705 <= link_avail) {
700 intel_dp->link_bw = bws[clock]; 706 intel_dp->link_bw = bws[clock];
701 intel_dp->lane_count = lane_count; 707 intel_dp->lane_count = lane_count;
@@ -768,12 +774,11 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
768 continue; 774 continue;
769 775
770 intel_dp = enc_to_intel_dp(encoder); 776 intel_dp = enc_to_intel_dp(encoder);
771 if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) { 777 if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT ||
778 intel_dp->base.type == INTEL_OUTPUT_EDP)
779 {
772 lane_count = intel_dp->lane_count; 780 lane_count = intel_dp->lane_count;
773 break; 781 break;
774 } else if (is_edp(intel_dp)) {
775 lane_count = dev_priv->edp.lanes;
776 break;
777 } 782 }
778 } 783 }
779 784
@@ -810,6 +815,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
810 struct drm_display_mode *adjusted_mode) 815 struct drm_display_mode *adjusted_mode)
811{ 816{
812 struct drm_device *dev = encoder->dev; 817 struct drm_device *dev = encoder->dev;
818 struct drm_i915_private *dev_priv = dev->dev_private;
813 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 819 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
814 struct drm_crtc *crtc = intel_dp->base.base.crtc; 820 struct drm_crtc *crtc = intel_dp->base.base.crtc;
815 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 821 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -822,18 +828,32 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
822 ironlake_edp_pll_off(encoder); 828 ironlake_edp_pll_off(encoder);
823 } 829 }
824 830
825 intel_dp->DP = DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; 831 /*
826 intel_dp->DP |= intel_dp->color_range; 832 * There are four kinds of DP registers:
833 *
834 * IBX PCH
835 * SNB CPU
836 * IVB CPU
837 * CPT PCH
838 *
839 * IBX PCH and CPU are the same for almost everything,
840 * except that the CPU DP PLL is configured in this
841 * register
842 *
843 * CPT PCH is quite different, having many bits moved
844 * to the TRANS_DP_CTL register instead. That
845 * configuration happens (oddly) in ironlake_pch_enable
846 */
847
848 /* Preserve the BIOS-computed detected bit. This is
849 * supposed to be read-only.
850 */
851 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
852 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
827 853
828 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 854 /* Handle DP bits in common between all three register formats */
829 intel_dp->DP |= DP_SYNC_HS_HIGH;
830 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
831 intel_dp->DP |= DP_SYNC_VS_HIGH;
832 855
833 if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) 856 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
834 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
835 else
836 intel_dp->DP |= DP_LINK_TRAIN_OFF;
837 857
838 switch (intel_dp->lane_count) { 858 switch (intel_dp->lane_count) {
839 case 1: 859 case 1:
@@ -852,59 +872,124 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
852 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; 872 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
853 intel_write_eld(encoder, adjusted_mode); 873 intel_write_eld(encoder, adjusted_mode);
854 } 874 }
855
856 memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); 875 memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
857 intel_dp->link_configuration[0] = intel_dp->link_bw; 876 intel_dp->link_configuration[0] = intel_dp->link_bw;
858 intel_dp->link_configuration[1] = intel_dp->lane_count; 877 intel_dp->link_configuration[1] = intel_dp->lane_count;
859 intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B; 878 intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B;
860
861 /* 879 /*
862 * Check for DPCD version > 1.1 and enhanced framing support 880 * Check for DPCD version > 1.1 and enhanced framing support
863 */ 881 */
864 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && 882 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
865 (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) { 883 (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
866 intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; 884 intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
867 intel_dp->DP |= DP_ENHANCED_FRAMING;
868 } 885 }
869 886
870 /* CPT DP's pipe select is decided in TRANS_DP_CTL */ 887 /* Split out the IBX/CPU vs CPT settings */
871 if (intel_crtc->pipe == 1 && !HAS_PCH_CPT(dev)) 888
872 intel_dp->DP |= DP_PIPEB_SELECT; 889 if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) {
890 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
891 intel_dp->DP |= DP_SYNC_HS_HIGH;
892 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
893 intel_dp->DP |= DP_SYNC_VS_HIGH;
894 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
895
896 if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
897 intel_dp->DP |= DP_ENHANCED_FRAMING;
898
899 intel_dp->DP |= intel_crtc->pipe << 29;
873 900
874 if (is_cpu_edp(intel_dp)) {
875 /* don't miss out required setting for eDP */ 901 /* don't miss out required setting for eDP */
876 intel_dp->DP |= DP_PLL_ENABLE; 902 intel_dp->DP |= DP_PLL_ENABLE;
877 if (adjusted_mode->clock < 200000) 903 if (adjusted_mode->clock < 200000)
878 intel_dp->DP |= DP_PLL_FREQ_160MHZ; 904 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
879 else 905 else
880 intel_dp->DP |= DP_PLL_FREQ_270MHZ; 906 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
907 } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
908 intel_dp->DP |= intel_dp->color_range;
909
910 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
911 intel_dp->DP |= DP_SYNC_HS_HIGH;
912 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
913 intel_dp->DP |= DP_SYNC_VS_HIGH;
914 intel_dp->DP |= DP_LINK_TRAIN_OFF;
915
916 if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
917 intel_dp->DP |= DP_ENHANCED_FRAMING;
918
919 if (intel_crtc->pipe == 1)
920 intel_dp->DP |= DP_PIPEB_SELECT;
921
922 if (is_cpu_edp(intel_dp)) {
923 /* don't miss out required setting for eDP */
924 intel_dp->DP |= DP_PLL_ENABLE;
925 if (adjusted_mode->clock < 200000)
926 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
927 else
928 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
929 }
930 } else {
931 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
881 } 932 }
882} 933}
883 934
884static void ironlake_wait_panel_off(struct intel_dp *intel_dp) 935#define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
936#define IDLE_ON_VALUE (PP_ON | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
937
938#define IDLE_OFF_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
939#define IDLE_OFF_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
940
941#define IDLE_CYCLE_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
942#define IDLE_CYCLE_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
943
944static void ironlake_wait_panel_status(struct intel_dp *intel_dp,
945 u32 mask,
946 u32 value)
885{ 947{
886 unsigned long off_time; 948 struct drm_device *dev = intel_dp->base.base.dev;
887 unsigned long delay; 949 struct drm_i915_private *dev_priv = dev->dev_private;
888 950
889 DRM_DEBUG_KMS("Wait for panel power off time\n"); 951 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
952 mask, value,
953 I915_READ(PCH_PP_STATUS),
954 I915_READ(PCH_PP_CONTROL));
890 955
891 if (ironlake_edp_have_panel_power(intel_dp) || 956 if (_wait_for((I915_READ(PCH_PP_STATUS) & mask) == value, 5000, 10)) {
892 ironlake_edp_have_panel_vdd(intel_dp)) 957 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
893 { 958 I915_READ(PCH_PP_STATUS),
894 DRM_DEBUG_KMS("Panel still on, no delay needed\n"); 959 I915_READ(PCH_PP_CONTROL));
895 return;
896 } 960 }
961}
897 962
898 off_time = intel_dp->panel_off_jiffies + msecs_to_jiffies(intel_dp->panel_power_down_delay); 963static void ironlake_wait_panel_on(struct intel_dp *intel_dp)
899 if (time_after(jiffies, off_time)) { 964{
900 DRM_DEBUG_KMS("Time already passed"); 965 DRM_DEBUG_KMS("Wait for panel power on\n");
901 return; 966 ironlake_wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
902 } 967}
903 delay = jiffies_to_msecs(off_time - jiffies); 968
904 if (delay > intel_dp->panel_power_down_delay) 969static void ironlake_wait_panel_off(struct intel_dp *intel_dp)
905 delay = intel_dp->panel_power_down_delay; 970{
906 DRM_DEBUG_KMS("Waiting an additional %ld ms\n", delay); 971 DRM_DEBUG_KMS("Wait for panel power off time\n");
907 msleep(delay); 972 ironlake_wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
973}
974
975static void ironlake_wait_panel_power_cycle(struct intel_dp *intel_dp)
976{
977 DRM_DEBUG_KMS("Wait for panel power cycle\n");
978 ironlake_wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
979}
980
981
982/* Read the current pp_control value, unlocking the register if it
983 * is locked
984 */
985
986static u32 ironlake_get_pp_control(struct drm_i915_private *dev_priv)
987{
988 u32 control = I915_READ(PCH_PP_CONTROL);
989
990 control &= ~PANEL_UNLOCK_MASK;
991 control |= PANEL_UNLOCK_REGS;
992 return control;
908} 993}
909 994
910static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp) 995static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
@@ -921,15 +1006,16 @@ static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
921 "eDP VDD already requested on\n"); 1006 "eDP VDD already requested on\n");
922 1007
923 intel_dp->want_panel_vdd = true; 1008 intel_dp->want_panel_vdd = true;
1009
924 if (ironlake_edp_have_panel_vdd(intel_dp)) { 1010 if (ironlake_edp_have_panel_vdd(intel_dp)) {
925 DRM_DEBUG_KMS("eDP VDD already on\n"); 1011 DRM_DEBUG_KMS("eDP VDD already on\n");
926 return; 1012 return;
927 } 1013 }
928 1014
929 ironlake_wait_panel_off(intel_dp); 1015 if (!ironlake_edp_have_panel_power(intel_dp))
930 pp = I915_READ(PCH_PP_CONTROL); 1016 ironlake_wait_panel_power_cycle(intel_dp);
931 pp &= ~PANEL_UNLOCK_MASK; 1017
932 pp |= PANEL_UNLOCK_REGS; 1018 pp = ironlake_get_pp_control(dev_priv);
933 pp |= EDP_FORCE_VDD; 1019 pp |= EDP_FORCE_VDD;
934 I915_WRITE(PCH_PP_CONTROL, pp); 1020 I915_WRITE(PCH_PP_CONTROL, pp);
935 POSTING_READ(PCH_PP_CONTROL); 1021 POSTING_READ(PCH_PP_CONTROL);
@@ -952,9 +1038,7 @@ static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
952 u32 pp; 1038 u32 pp;
953 1039
954 if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) { 1040 if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) {
955 pp = I915_READ(PCH_PP_CONTROL); 1041 pp = ironlake_get_pp_control(dev_priv);
956 pp &= ~PANEL_UNLOCK_MASK;
957 pp |= PANEL_UNLOCK_REGS;
958 pp &= ~EDP_FORCE_VDD; 1042 pp &= ~EDP_FORCE_VDD;
959 I915_WRITE(PCH_PP_CONTROL, pp); 1043 I915_WRITE(PCH_PP_CONTROL, pp);
960 POSTING_READ(PCH_PP_CONTROL); 1044 POSTING_READ(PCH_PP_CONTROL);
@@ -962,7 +1046,8 @@ static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
962 /* Make sure sequencer is idle before allowing subsequent activity */ 1046 /* Make sure sequencer is idle before allowing subsequent activity */
963 DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n", 1047 DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n",
964 I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL)); 1048 I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL));
965 intel_dp->panel_off_jiffies = jiffies; 1049
1050 msleep(intel_dp->panel_power_down_delay);
966 } 1051 }
967} 1052}
968 1053
@@ -972,9 +1057,9 @@ static void ironlake_panel_vdd_work(struct work_struct *__work)
972 struct intel_dp, panel_vdd_work); 1057 struct intel_dp, panel_vdd_work);
973 struct drm_device *dev = intel_dp->base.base.dev; 1058 struct drm_device *dev = intel_dp->base.base.dev;
974 1059
975 mutex_lock(&dev->struct_mutex); 1060 mutex_lock(&dev->mode_config.mutex);
976 ironlake_panel_vdd_off_sync(intel_dp); 1061 ironlake_panel_vdd_off_sync(intel_dp);
977 mutex_unlock(&dev->struct_mutex); 1062 mutex_unlock(&dev->mode_config.mutex);
978} 1063}
979 1064
980static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) 1065static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
@@ -984,7 +1069,7 @@ static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
984 1069
985 DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd); 1070 DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd);
986 WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on"); 1071 WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on");
987 1072
988 intel_dp->want_panel_vdd = false; 1073 intel_dp->want_panel_vdd = false;
989 1074
990 if (sync) { 1075 if (sync) {
@@ -1000,23 +1085,25 @@ static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1000 } 1085 }
1001} 1086}
1002 1087
1003/* Returns true if the panel was already on when called */
1004static void ironlake_edp_panel_on(struct intel_dp *intel_dp) 1088static void ironlake_edp_panel_on(struct intel_dp *intel_dp)
1005{ 1089{
1006 struct drm_device *dev = intel_dp->base.base.dev; 1090 struct drm_device *dev = intel_dp->base.base.dev;
1007 struct drm_i915_private *dev_priv = dev->dev_private; 1091 struct drm_i915_private *dev_priv = dev->dev_private;
1008 u32 pp, idle_on_mask = PP_ON | PP_SEQUENCE_STATE_ON_IDLE; 1092 u32 pp;
1009 1093
1010 if (!is_edp(intel_dp)) 1094 if (!is_edp(intel_dp))
1011 return; 1095 return;
1012 if (ironlake_edp_have_panel_power(intel_dp)) 1096
1097 DRM_DEBUG_KMS("Turn eDP power on\n");
1098
1099 if (ironlake_edp_have_panel_power(intel_dp)) {
1100 DRM_DEBUG_KMS("eDP power already on\n");
1013 return; 1101 return;
1102 }
1014 1103
1015 ironlake_wait_panel_off(intel_dp); 1104 ironlake_wait_panel_power_cycle(intel_dp);
1016 pp = I915_READ(PCH_PP_CONTROL);
1017 pp &= ~PANEL_UNLOCK_MASK;
1018 pp |= PANEL_UNLOCK_REGS;
1019 1105
1106 pp = ironlake_get_pp_control(dev_priv);
1020 if (IS_GEN5(dev)) { 1107 if (IS_GEN5(dev)) {
1021 /* ILK workaround: disable reset around power sequence */ 1108 /* ILK workaround: disable reset around power sequence */
1022 pp &= ~PANEL_POWER_RESET; 1109 pp &= ~PANEL_POWER_RESET;
@@ -1025,13 +1112,13 @@ static void ironlake_edp_panel_on(struct intel_dp *intel_dp)
1025 } 1112 }
1026 1113
1027 pp |= POWER_TARGET_ON; 1114 pp |= POWER_TARGET_ON;
1115 if (!IS_GEN5(dev))
1116 pp |= PANEL_POWER_RESET;
1117
1028 I915_WRITE(PCH_PP_CONTROL, pp); 1118 I915_WRITE(PCH_PP_CONTROL, pp);
1029 POSTING_READ(PCH_PP_CONTROL); 1119 POSTING_READ(PCH_PP_CONTROL);
1030 1120
1031 if (wait_for((I915_READ(PCH_PP_STATUS) & idle_on_mask) == idle_on_mask, 1121 ironlake_wait_panel_on(intel_dp);
1032 5000))
1033 DRM_ERROR("panel on wait timed out: 0x%08x\n",
1034 I915_READ(PCH_PP_STATUS));
1035 1122
1036 if (IS_GEN5(dev)) { 1123 if (IS_GEN5(dev)) {
1037 pp |= PANEL_POWER_RESET; /* restore panel reset bit */ 1124 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
@@ -1040,46 +1127,25 @@ static void ironlake_edp_panel_on(struct intel_dp *intel_dp)
1040 } 1127 }
1041} 1128}
1042 1129
1043static void ironlake_edp_panel_off(struct drm_encoder *encoder) 1130static void ironlake_edp_panel_off(struct intel_dp *intel_dp)
1044{ 1131{
1045 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1132 struct drm_device *dev = intel_dp->base.base.dev;
1046 struct drm_device *dev = encoder->dev;
1047 struct drm_i915_private *dev_priv = dev->dev_private; 1133 struct drm_i915_private *dev_priv = dev->dev_private;
1048 u32 pp, idle_off_mask = PP_ON | PP_SEQUENCE_MASK | 1134 u32 pp;
1049 PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK;
1050 1135
1051 if (!is_edp(intel_dp)) 1136 if (!is_edp(intel_dp))
1052 return; 1137 return;
1053 pp = I915_READ(PCH_PP_CONTROL);
1054 pp &= ~PANEL_UNLOCK_MASK;
1055 pp |= PANEL_UNLOCK_REGS;
1056 1138
1057 if (IS_GEN5(dev)) { 1139 DRM_DEBUG_KMS("Turn eDP power off\n");
1058 /* ILK workaround: disable reset around power sequence */
1059 pp &= ~PANEL_POWER_RESET;
1060 I915_WRITE(PCH_PP_CONTROL, pp);
1061 POSTING_READ(PCH_PP_CONTROL);
1062 }
1063 1140
1064 intel_dp->panel_off_jiffies = jiffies; 1141 WARN(intel_dp->want_panel_vdd, "Cannot turn power off while VDD is on\n");
1065 1142
1066 if (IS_GEN5(dev)) { 1143 pp = ironlake_get_pp_control(dev_priv);
1067 pp &= ~POWER_TARGET_ON; 1144 pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
1068 I915_WRITE(PCH_PP_CONTROL, pp); 1145 I915_WRITE(PCH_PP_CONTROL, pp);
1069 POSTING_READ(PCH_PP_CONTROL); 1146 POSTING_READ(PCH_PP_CONTROL);
1070 pp &= ~POWER_TARGET_ON;
1071 I915_WRITE(PCH_PP_CONTROL, pp);
1072 POSTING_READ(PCH_PP_CONTROL);
1073 msleep(intel_dp->panel_power_cycle_delay);
1074
1075 if (wait_for((I915_READ(PCH_PP_STATUS) & idle_off_mask) == 0, 5000))
1076 DRM_ERROR("panel off wait timed out: 0x%08x\n",
1077 I915_READ(PCH_PP_STATUS));
1078 1147
1079 pp |= PANEL_POWER_RESET; /* restore panel reset bit */ 1148 ironlake_wait_panel_off(intel_dp);
1080 I915_WRITE(PCH_PP_CONTROL, pp);
1081 POSTING_READ(PCH_PP_CONTROL);
1082 }
1083} 1149}
1084 1150
1085static void ironlake_edp_backlight_on(struct intel_dp *intel_dp) 1151static void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
@@ -1099,9 +1165,7 @@ static void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
1099 * allowing it to appear. 1165 * allowing it to appear.
1100 */ 1166 */
1101 msleep(intel_dp->backlight_on_delay); 1167 msleep(intel_dp->backlight_on_delay);
1102 pp = I915_READ(PCH_PP_CONTROL); 1168 pp = ironlake_get_pp_control(dev_priv);
1103 pp &= ~PANEL_UNLOCK_MASK;
1104 pp |= PANEL_UNLOCK_REGS;
1105 pp |= EDP_BLC_ENABLE; 1169 pp |= EDP_BLC_ENABLE;
1106 I915_WRITE(PCH_PP_CONTROL, pp); 1170 I915_WRITE(PCH_PP_CONTROL, pp);
1107 POSTING_READ(PCH_PP_CONTROL); 1171 POSTING_READ(PCH_PP_CONTROL);
@@ -1117,9 +1181,7 @@ static void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
1117 return; 1181 return;
1118 1182
1119 DRM_DEBUG_KMS("\n"); 1183 DRM_DEBUG_KMS("\n");
1120 pp = I915_READ(PCH_PP_CONTROL); 1184 pp = ironlake_get_pp_control(dev_priv);
1121 pp &= ~PANEL_UNLOCK_MASK;
1122 pp |= PANEL_UNLOCK_REGS;
1123 pp &= ~EDP_BLC_ENABLE; 1185 pp &= ~EDP_BLC_ENABLE;
1124 I915_WRITE(PCH_PP_CONTROL, pp); 1186 I915_WRITE(PCH_PP_CONTROL, pp);
1125 POSTING_READ(PCH_PP_CONTROL); 1187 POSTING_READ(PCH_PP_CONTROL);
@@ -1187,17 +1249,18 @@ static void intel_dp_prepare(struct drm_encoder *encoder)
1187{ 1249{
1188 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1250 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1189 1251
1252 ironlake_edp_backlight_off(intel_dp);
1253 ironlake_edp_panel_off(intel_dp);
1254
1190 /* Wake up the sink first */ 1255 /* Wake up the sink first */
1191 ironlake_edp_panel_vdd_on(intel_dp); 1256 ironlake_edp_panel_vdd_on(intel_dp);
1192 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 1257 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
1258 intel_dp_link_down(intel_dp);
1193 ironlake_edp_panel_vdd_off(intel_dp, false); 1259 ironlake_edp_panel_vdd_off(intel_dp, false);
1194 1260
1195 /* Make sure the panel is off before trying to 1261 /* Make sure the panel is off before trying to
1196 * change the mode 1262 * change the mode
1197 */ 1263 */
1198 ironlake_edp_backlight_off(intel_dp);
1199 intel_dp_link_down(intel_dp);
1200 ironlake_edp_panel_off(encoder);
1201} 1264}
1202 1265
1203static void intel_dp_commit(struct drm_encoder *encoder) 1266static void intel_dp_commit(struct drm_encoder *encoder)
@@ -1211,7 +1274,6 @@ static void intel_dp_commit(struct drm_encoder *encoder)
1211 intel_dp_start_link_train(intel_dp); 1274 intel_dp_start_link_train(intel_dp);
1212 ironlake_edp_panel_on(intel_dp); 1275 ironlake_edp_panel_on(intel_dp);
1213 ironlake_edp_panel_vdd_off(intel_dp, true); 1276 ironlake_edp_panel_vdd_off(intel_dp, true);
1214
1215 intel_dp_complete_link_train(intel_dp); 1277 intel_dp_complete_link_train(intel_dp);
1216 ironlake_edp_backlight_on(intel_dp); 1278 ironlake_edp_backlight_on(intel_dp);
1217 1279
@@ -1230,16 +1292,20 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
1230 uint32_t dp_reg = I915_READ(intel_dp->output_reg); 1292 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
1231 1293
1232 if (mode != DRM_MODE_DPMS_ON) { 1294 if (mode != DRM_MODE_DPMS_ON) {
1295 ironlake_edp_backlight_off(intel_dp);
1296 ironlake_edp_panel_off(intel_dp);
1297
1233 ironlake_edp_panel_vdd_on(intel_dp); 1298 ironlake_edp_panel_vdd_on(intel_dp);
1234 if (is_edp(intel_dp))
1235 ironlake_edp_backlight_off(intel_dp);
1236 intel_dp_sink_dpms(intel_dp, mode); 1299 intel_dp_sink_dpms(intel_dp, mode);
1237 intel_dp_link_down(intel_dp); 1300 intel_dp_link_down(intel_dp);
1238 ironlake_edp_panel_off(encoder);
1239 if (is_edp(intel_dp) && !is_pch_edp(intel_dp))
1240 ironlake_edp_pll_off(encoder);
1241 ironlake_edp_panel_vdd_off(intel_dp, false); 1301 ironlake_edp_panel_vdd_off(intel_dp, false);
1302
1303 if (is_cpu_edp(intel_dp))
1304 ironlake_edp_pll_off(encoder);
1242 } else { 1305 } else {
1306 if (is_cpu_edp(intel_dp))
1307 ironlake_edp_pll_on(encoder);
1308
1243 ironlake_edp_panel_vdd_on(intel_dp); 1309 ironlake_edp_panel_vdd_on(intel_dp);
1244 intel_dp_sink_dpms(intel_dp, mode); 1310 intel_dp_sink_dpms(intel_dp, mode);
1245 if (!(dp_reg & DP_PORT_EN)) { 1311 if (!(dp_reg & DP_PORT_EN)) {
@@ -1247,7 +1313,6 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
1247 ironlake_edp_panel_on(intel_dp); 1313 ironlake_edp_panel_on(intel_dp);
1248 ironlake_edp_panel_vdd_off(intel_dp, true); 1314 ironlake_edp_panel_vdd_off(intel_dp, true);
1249 intel_dp_complete_link_train(intel_dp); 1315 intel_dp_complete_link_train(intel_dp);
1250 ironlake_edp_backlight_on(intel_dp);
1251 } else 1316 } else
1252 ironlake_edp_panel_vdd_off(intel_dp, false); 1317 ironlake_edp_panel_vdd_off(intel_dp, false);
1253 ironlake_edp_backlight_on(intel_dp); 1318 ironlake_edp_backlight_on(intel_dp);
@@ -1285,11 +1350,11 @@ intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address,
1285 * link status information 1350 * link status information
1286 */ 1351 */
1287static bool 1352static bool
1288intel_dp_get_link_status(struct intel_dp *intel_dp) 1353intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
1289{ 1354{
1290 return intel_dp_aux_native_read_retry(intel_dp, 1355 return intel_dp_aux_native_read_retry(intel_dp,
1291 DP_LANE0_1_STATUS, 1356 DP_LANE0_1_STATUS,
1292 intel_dp->link_status, 1357 link_status,
1293 DP_LINK_STATUS_SIZE); 1358 DP_LINK_STATUS_SIZE);
1294} 1359}
1295 1360
@@ -1301,27 +1366,25 @@ intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
1301} 1366}
1302 1367
1303static uint8_t 1368static uint8_t
1304intel_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE], 1369intel_get_adjust_request_voltage(uint8_t adjust_request[2],
1305 int lane) 1370 int lane)
1306{ 1371{
1307 int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
1308 int s = ((lane & 1) ? 1372 int s = ((lane & 1) ?
1309 DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT : 1373 DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
1310 DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT); 1374 DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
1311 uint8_t l = intel_dp_link_status(link_status, i); 1375 uint8_t l = adjust_request[lane>>1];
1312 1376
1313 return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT; 1377 return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
1314} 1378}
1315 1379
1316static uint8_t 1380static uint8_t
1317intel_get_adjust_request_pre_emphasis(uint8_t link_status[DP_LINK_STATUS_SIZE], 1381intel_get_adjust_request_pre_emphasis(uint8_t adjust_request[2],
1318 int lane) 1382 int lane)
1319{ 1383{
1320 int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
1321 int s = ((lane & 1) ? 1384 int s = ((lane & 1) ?
1322 DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT : 1385 DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
1323 DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT); 1386 DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
1324 uint8_t l = intel_dp_link_status(link_status, i); 1387 uint8_t l = adjust_request[lane>>1];
1325 1388
1326 return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT; 1389 return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
1327} 1390}
@@ -1343,34 +1406,63 @@ static char *link_train_names[] = {
1343 * These are source-specific values; current Intel hardware supports 1406 * These are source-specific values; current Intel hardware supports
1344 * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB 1407 * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
1345 */ 1408 */
1346#define I830_DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_800
1347 1409
1348static uint8_t 1410static uint8_t
1349intel_dp_pre_emphasis_max(uint8_t voltage_swing) 1411intel_dp_voltage_max(struct intel_dp *intel_dp)
1350{ 1412{
1351 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 1413 struct drm_device *dev = intel_dp->base.base.dev;
1352 case DP_TRAIN_VOLTAGE_SWING_400: 1414
1353 return DP_TRAIN_PRE_EMPHASIS_6; 1415 if (IS_GEN7(dev) && is_cpu_edp(intel_dp))
1354 case DP_TRAIN_VOLTAGE_SWING_600: 1416 return DP_TRAIN_VOLTAGE_SWING_800;
1355 return DP_TRAIN_PRE_EMPHASIS_6; 1417 else if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
1356 case DP_TRAIN_VOLTAGE_SWING_800: 1418 return DP_TRAIN_VOLTAGE_SWING_1200;
1357 return DP_TRAIN_PRE_EMPHASIS_3_5; 1419 else
1358 case DP_TRAIN_VOLTAGE_SWING_1200: 1420 return DP_TRAIN_VOLTAGE_SWING_800;
1359 default: 1421}
1360 return DP_TRAIN_PRE_EMPHASIS_0; 1422
1423static uint8_t
1424intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
1425{
1426 struct drm_device *dev = intel_dp->base.base.dev;
1427
1428 if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
1429 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1430 case DP_TRAIN_VOLTAGE_SWING_400:
1431 return DP_TRAIN_PRE_EMPHASIS_6;
1432 case DP_TRAIN_VOLTAGE_SWING_600:
1433 case DP_TRAIN_VOLTAGE_SWING_800:
1434 return DP_TRAIN_PRE_EMPHASIS_3_5;
1435 default:
1436 return DP_TRAIN_PRE_EMPHASIS_0;
1437 }
1438 } else {
1439 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1440 case DP_TRAIN_VOLTAGE_SWING_400:
1441 return DP_TRAIN_PRE_EMPHASIS_6;
1442 case DP_TRAIN_VOLTAGE_SWING_600:
1443 return DP_TRAIN_PRE_EMPHASIS_6;
1444 case DP_TRAIN_VOLTAGE_SWING_800:
1445 return DP_TRAIN_PRE_EMPHASIS_3_5;
1446 case DP_TRAIN_VOLTAGE_SWING_1200:
1447 default:
1448 return DP_TRAIN_PRE_EMPHASIS_0;
1449 }
1361 } 1450 }
1362} 1451}
1363 1452
1364static void 1453static void
1365intel_get_adjust_train(struct intel_dp *intel_dp) 1454intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
1366{ 1455{
1367 uint8_t v = 0; 1456 uint8_t v = 0;
1368 uint8_t p = 0; 1457 uint8_t p = 0;
1369 int lane; 1458 int lane;
1459 uint8_t *adjust_request = link_status + (DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS);
1460 uint8_t voltage_max;
1461 uint8_t preemph_max;
1370 1462
1371 for (lane = 0; lane < intel_dp->lane_count; lane++) { 1463 for (lane = 0; lane < intel_dp->lane_count; lane++) {
1372 uint8_t this_v = intel_get_adjust_request_voltage(intel_dp->link_status, lane); 1464 uint8_t this_v = intel_get_adjust_request_voltage(adjust_request, lane);
1373 uint8_t this_p = intel_get_adjust_request_pre_emphasis(intel_dp->link_status, lane); 1465 uint8_t this_p = intel_get_adjust_request_pre_emphasis(adjust_request, lane);
1374 1466
1375 if (this_v > v) 1467 if (this_v > v)
1376 v = this_v; 1468 v = this_v;
@@ -1378,18 +1470,20 @@ intel_get_adjust_train(struct intel_dp *intel_dp)
1378 p = this_p; 1470 p = this_p;
1379 } 1471 }
1380 1472
1381 if (v >= I830_DP_VOLTAGE_MAX) 1473 voltage_max = intel_dp_voltage_max(intel_dp);
1382 v = I830_DP_VOLTAGE_MAX | DP_TRAIN_MAX_SWING_REACHED; 1474 if (v >= voltage_max)
1475 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
1383 1476
1384 if (p >= intel_dp_pre_emphasis_max(v)) 1477 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
1385 p = intel_dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; 1478 if (p >= preemph_max)
1479 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
1386 1480
1387 for (lane = 0; lane < 4; lane++) 1481 for (lane = 0; lane < 4; lane++)
1388 intel_dp->train_set[lane] = v | p; 1482 intel_dp->train_set[lane] = v | p;
1389} 1483}
1390 1484
1391static uint32_t 1485static uint32_t
1392intel_dp_signal_levels(uint8_t train_set, int lane_count) 1486intel_dp_signal_levels(uint8_t train_set)
1393{ 1487{
1394 uint32_t signal_levels = 0; 1488 uint32_t signal_levels = 0;
1395 1489
@@ -1454,13 +1548,43 @@ intel_gen6_edp_signal_levels(uint8_t train_set)
1454 } 1548 }
1455} 1549}
1456 1550
1551/* Gen7's DP voltage swing and pre-emphasis control */
1552static uint32_t
1553intel_gen7_edp_signal_levels(uint8_t train_set)
1554{
1555 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
1556 DP_TRAIN_PRE_EMPHASIS_MASK);
1557 switch (signal_levels) {
1558 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
1559 return EDP_LINK_TRAIN_400MV_0DB_IVB;
1560 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
1561 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
1562 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
1563 return EDP_LINK_TRAIN_400MV_6DB_IVB;
1564
1565 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
1566 return EDP_LINK_TRAIN_600MV_0DB_IVB;
1567 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
1568 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
1569
1570 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
1571 return EDP_LINK_TRAIN_800MV_0DB_IVB;
1572 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
1573 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
1574
1575 default:
1576 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
1577 "0x%x\n", signal_levels);
1578 return EDP_LINK_TRAIN_500MV_0DB_IVB;
1579 }
1580}
1581
1457static uint8_t 1582static uint8_t
1458intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE], 1583intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
1459 int lane) 1584 int lane)
1460{ 1585{
1461 int i = DP_LANE0_1_STATUS + (lane >> 1);
1462 int s = (lane & 1) * 4; 1586 int s = (lane & 1) * 4;
1463 uint8_t l = intel_dp_link_status(link_status, i); 1587 uint8_t l = link_status[lane>>1];
1464 1588
1465 return (l >> s) & 0xf; 1589 return (l >> s) & 0xf;
1466} 1590}
@@ -1485,18 +1609,18 @@ intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count
1485 DP_LANE_CHANNEL_EQ_DONE|\ 1609 DP_LANE_CHANNEL_EQ_DONE|\
1486 DP_LANE_SYMBOL_LOCKED) 1610 DP_LANE_SYMBOL_LOCKED)
1487static bool 1611static bool
1488intel_channel_eq_ok(struct intel_dp *intel_dp) 1612intel_channel_eq_ok(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
1489{ 1613{
1490 uint8_t lane_align; 1614 uint8_t lane_align;
1491 uint8_t lane_status; 1615 uint8_t lane_status;
1492 int lane; 1616 int lane;
1493 1617
1494 lane_align = intel_dp_link_status(intel_dp->link_status, 1618 lane_align = intel_dp_link_status(link_status,
1495 DP_LANE_ALIGN_STATUS_UPDATED); 1619 DP_LANE_ALIGN_STATUS_UPDATED);
1496 if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0) 1620 if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
1497 return false; 1621 return false;
1498 for (lane = 0; lane < intel_dp->lane_count; lane++) { 1622 for (lane = 0; lane < intel_dp->lane_count; lane++) {
1499 lane_status = intel_get_lane_status(intel_dp->link_status, lane); 1623 lane_status = intel_get_lane_status(link_status, lane);
1500 if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS) 1624 if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS)
1501 return false; 1625 return false;
1502 } 1626 }
@@ -1521,8 +1645,9 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
1521 1645
1522 ret = intel_dp_aux_native_write(intel_dp, 1646 ret = intel_dp_aux_native_write(intel_dp,
1523 DP_TRAINING_LANE0_SET, 1647 DP_TRAINING_LANE0_SET,
1524 intel_dp->train_set, 4); 1648 intel_dp->train_set,
1525 if (ret != 4) 1649 intel_dp->lane_count);
1650 if (ret != intel_dp->lane_count)
1526 return false; 1651 return false;
1527 1652
1528 return true; 1653 return true;
@@ -1538,7 +1663,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
1538 int i; 1663 int i;
1539 uint8_t voltage; 1664 uint8_t voltage;
1540 bool clock_recovery = false; 1665 bool clock_recovery = false;
1541 int tries; 1666 int voltage_tries, loop_tries;
1542 u32 reg; 1667 u32 reg;
1543 uint32_t DP = intel_dp->DP; 1668 uint32_t DP = intel_dp->DP;
1544 1669
@@ -1559,26 +1684,35 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
1559 DP_LINK_CONFIGURATION_SIZE); 1684 DP_LINK_CONFIGURATION_SIZE);
1560 1685
1561 DP |= DP_PORT_EN; 1686 DP |= DP_PORT_EN;
1562 if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) 1687
1688 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
1563 DP &= ~DP_LINK_TRAIN_MASK_CPT; 1689 DP &= ~DP_LINK_TRAIN_MASK_CPT;
1564 else 1690 else
1565 DP &= ~DP_LINK_TRAIN_MASK; 1691 DP &= ~DP_LINK_TRAIN_MASK;
1566 memset(intel_dp->train_set, 0, 4); 1692 memset(intel_dp->train_set, 0, 4);
1567 voltage = 0xff; 1693 voltage = 0xff;
1568 tries = 0; 1694 voltage_tries = 0;
1695 loop_tries = 0;
1569 clock_recovery = false; 1696 clock_recovery = false;
1570 for (;;) { 1697 for (;;) {
1571 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ 1698 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
1699 uint8_t link_status[DP_LINK_STATUS_SIZE];
1572 uint32_t signal_levels; 1700 uint32_t signal_levels;
1573 if (IS_GEN6(dev) && is_edp(intel_dp)) { 1701
1702
1703 if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
1704 signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
1705 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
1706 } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
1574 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); 1707 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
1575 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; 1708 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
1576 } else { 1709 } else {
1577 signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count); 1710 signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]);
1711 DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n", signal_levels);
1578 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; 1712 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
1579 } 1713 }
1580 1714
1581 if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) 1715 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
1582 reg = DP | DP_LINK_TRAIN_PAT_1_CPT; 1716 reg = DP | DP_LINK_TRAIN_PAT_1_CPT;
1583 else 1717 else
1584 reg = DP | DP_LINK_TRAIN_PAT_1; 1718 reg = DP | DP_LINK_TRAIN_PAT_1;
@@ -1590,10 +1724,13 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
1590 /* Set training pattern 1 */ 1724 /* Set training pattern 1 */
1591 1725
1592 udelay(100); 1726 udelay(100);
1593 if (!intel_dp_get_link_status(intel_dp)) 1727 if (!intel_dp_get_link_status(intel_dp, link_status)) {
1728 DRM_ERROR("failed to get link status\n");
1594 break; 1729 break;
1730 }
1595 1731
1596 if (intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) { 1732 if (intel_clock_recovery_ok(link_status, intel_dp->lane_count)) {
1733 DRM_DEBUG_KMS("clock recovery OK\n");
1597 clock_recovery = true; 1734 clock_recovery = true;
1598 break; 1735 break;
1599 } 1736 }
@@ -1602,20 +1739,30 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
1602 for (i = 0; i < intel_dp->lane_count; i++) 1739 for (i = 0; i < intel_dp->lane_count; i++)
1603 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) 1740 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
1604 break; 1741 break;
1605 if (i == intel_dp->lane_count) 1742 if (i == intel_dp->lane_count) {
1606 break; 1743 ++loop_tries;
1744 if (loop_tries == 5) {
1745 DRM_DEBUG_KMS("too many full retries, give up\n");
1746 break;
1747 }
1748 memset(intel_dp->train_set, 0, 4);
1749 voltage_tries = 0;
1750 continue;
1751 }
1607 1752
1608 /* Check to see if we've tried the same voltage 5 times */ 1753 /* Check to see if we've tried the same voltage 5 times */
1609 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { 1754 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
1610 ++tries; 1755 ++voltage_tries;
1611 if (tries == 5) 1756 if (voltage_tries == 5) {
1757 DRM_DEBUG_KMS("too many voltage retries, give up\n");
1612 break; 1758 break;
1759 }
1613 } else 1760 } else
1614 tries = 0; 1761 voltage_tries = 0;
1615 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; 1762 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
1616 1763
1617 /* Compute new intel_dp->train_set as requested by target */ 1764 /* Compute new intel_dp->train_set as requested by target */
1618 intel_get_adjust_train(intel_dp); 1765 intel_get_adjust_train(intel_dp, link_status);
1619 } 1766 }
1620 1767
1621 intel_dp->DP = DP; 1768 intel_dp->DP = DP;
@@ -1638,6 +1785,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
1638 for (;;) { 1785 for (;;) {
1639 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ 1786 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
1640 uint32_t signal_levels; 1787 uint32_t signal_levels;
1788 uint8_t link_status[DP_LINK_STATUS_SIZE];
1641 1789
1642 if (cr_tries > 5) { 1790 if (cr_tries > 5) {
1643 DRM_ERROR("failed to train DP, aborting\n"); 1791 DRM_ERROR("failed to train DP, aborting\n");
@@ -1645,15 +1793,18 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
1645 break; 1793 break;
1646 } 1794 }
1647 1795
1648 if (IS_GEN6(dev) && is_edp(intel_dp)) { 1796 if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
1797 signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
1798 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
1799 } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
1649 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); 1800 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
1650 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; 1801 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
1651 } else { 1802 } else {
1652 signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count); 1803 signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]);
1653 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; 1804 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
1654 } 1805 }
1655 1806
1656 if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) 1807 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
1657 reg = DP | DP_LINK_TRAIN_PAT_2_CPT; 1808 reg = DP | DP_LINK_TRAIN_PAT_2_CPT;
1658 else 1809 else
1659 reg = DP | DP_LINK_TRAIN_PAT_2; 1810 reg = DP | DP_LINK_TRAIN_PAT_2;
@@ -1665,17 +1816,17 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
1665 break; 1816 break;
1666 1817
1667 udelay(400); 1818 udelay(400);
1668 if (!intel_dp_get_link_status(intel_dp)) 1819 if (!intel_dp_get_link_status(intel_dp, link_status))
1669 break; 1820 break;
1670 1821
1671 /* Make sure clock is still ok */ 1822 /* Make sure clock is still ok */
1672 if (!intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) { 1823 if (!intel_clock_recovery_ok(link_status, intel_dp->lane_count)) {
1673 intel_dp_start_link_train(intel_dp); 1824 intel_dp_start_link_train(intel_dp);
1674 cr_tries++; 1825 cr_tries++;
1675 continue; 1826 continue;
1676 } 1827 }
1677 1828
1678 if (intel_channel_eq_ok(intel_dp)) { 1829 if (intel_channel_eq_ok(intel_dp, link_status)) {
1679 channel_eq = true; 1830 channel_eq = true;
1680 break; 1831 break;
1681 } 1832 }
@@ -1690,11 +1841,11 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
1690 } 1841 }
1691 1842
1692 /* Compute new intel_dp->train_set as requested by target */ 1843 /* Compute new intel_dp->train_set as requested by target */
1693 intel_get_adjust_train(intel_dp); 1844 intel_get_adjust_train(intel_dp, link_status);
1694 ++tries; 1845 ++tries;
1695 } 1846 }
1696 1847
1697 if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) 1848 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
1698 reg = DP | DP_LINK_TRAIN_OFF_CPT; 1849 reg = DP | DP_LINK_TRAIN_OFF_CPT;
1699 else 1850 else
1700 reg = DP | DP_LINK_TRAIN_OFF; 1851 reg = DP | DP_LINK_TRAIN_OFF;
@@ -1724,7 +1875,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
1724 udelay(100); 1875 udelay(100);
1725 } 1876 }
1726 1877
1727 if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) { 1878 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
1728 DP &= ~DP_LINK_TRAIN_MASK_CPT; 1879 DP &= ~DP_LINK_TRAIN_MASK_CPT;
1729 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT); 1880 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
1730 } else { 1881 } else {
@@ -1735,8 +1886,12 @@ intel_dp_link_down(struct intel_dp *intel_dp)
1735 1886
1736 msleep(17); 1887 msleep(17);
1737 1888
1738 if (is_edp(intel_dp)) 1889 if (is_edp(intel_dp)) {
1739 DP |= DP_LINK_TRAIN_OFF; 1890 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
1891 DP |= DP_LINK_TRAIN_OFF_CPT;
1892 else
1893 DP |= DP_LINK_TRAIN_OFF;
1894 }
1740 1895
1741 if (!HAS_PCH_CPT(dev) && 1896 if (!HAS_PCH_CPT(dev) &&
1742 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) { 1897 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
@@ -1822,6 +1977,7 @@ static void
1822intel_dp_check_link_status(struct intel_dp *intel_dp) 1977intel_dp_check_link_status(struct intel_dp *intel_dp)
1823{ 1978{
1824 u8 sink_irq_vector; 1979 u8 sink_irq_vector;
1980 u8 link_status[DP_LINK_STATUS_SIZE];
1825 1981
1826 if (intel_dp->dpms_mode != DRM_MODE_DPMS_ON) 1982 if (intel_dp->dpms_mode != DRM_MODE_DPMS_ON)
1827 return; 1983 return;
@@ -1830,7 +1986,7 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
1830 return; 1986 return;
1831 1987
1832 /* Try to read receiver status if the link appears to be up */ 1988 /* Try to read receiver status if the link appears to be up */
1833 if (!intel_dp_get_link_status(intel_dp)) { 1989 if (!intel_dp_get_link_status(intel_dp, link_status)) {
1834 intel_dp_link_down(intel_dp); 1990 intel_dp_link_down(intel_dp);
1835 return; 1991 return;
1836 } 1992 }
@@ -1855,7 +2011,7 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
1855 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n"); 2011 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
1856 } 2012 }
1857 2013
1858 if (!intel_channel_eq_ok(intel_dp)) { 2014 if (!intel_channel_eq_ok(intel_dp, link_status)) {
1859 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n", 2015 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
1860 drm_get_encoder_name(&intel_dp->base.base)); 2016 drm_get_encoder_name(&intel_dp->base.base));
1861 intel_dp_start_link_train(intel_dp); 2017 intel_dp_start_link_train(intel_dp);
@@ -2179,7 +2335,8 @@ intel_trans_dp_port_sel(struct drm_crtc *crtc)
2179 continue; 2335 continue;
2180 2336
2181 intel_dp = enc_to_intel_dp(encoder); 2337 intel_dp = enc_to_intel_dp(encoder);
2182 if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) 2338 if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT ||
2339 intel_dp->base.type == INTEL_OUTPUT_EDP)
2183 return intel_dp->output_reg; 2340 return intel_dp->output_reg;
2184 } 2341 }
2185 2342
@@ -2321,7 +2478,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
2321 2478
2322 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >> 2479 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
2323 PANEL_LIGHT_ON_DELAY_SHIFT; 2480 PANEL_LIGHT_ON_DELAY_SHIFT;
2324 2481
2325 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >> 2482 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
2326 PANEL_LIGHT_OFF_DELAY_SHIFT; 2483 PANEL_LIGHT_OFF_DELAY_SHIFT;
2327 2484
@@ -2354,11 +2511,10 @@ intel_dp_init(struct drm_device *dev, int output_reg)
2354 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n", 2511 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
2355 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay); 2512 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
2356 2513
2357 intel_dp->panel_off_jiffies = jiffies - intel_dp->panel_power_down_delay;
2358
2359 ironlake_edp_panel_vdd_on(intel_dp); 2514 ironlake_edp_panel_vdd_on(intel_dp);
2360 ret = intel_dp_get_dpcd(intel_dp); 2515 ret = intel_dp_get_dpcd(intel_dp);
2361 ironlake_edp_panel_vdd_off(intel_dp, false); 2516 ironlake_edp_panel_vdd_off(intel_dp, false);
2517
2362 if (ret) { 2518 if (ret) {
2363 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) 2519 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
2364 dev_priv->no_aux_handshake = 2520 dev_priv->no_aux_handshake =
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 23c56221fe8f..82a459bfccbc 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -110,6 +110,7 @@
110/* drm_display_mode->private_flags */ 110/* drm_display_mode->private_flags */
111#define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0) 111#define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0)
112#define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT) 112#define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT)
113#define INTEL_MODE_DP_FORCE_6BPC (0x10)
113 114
114static inline void 115static inline void
115intel_mode_set_pixel_multiplier(struct drm_display_mode *mode, 116intel_mode_set_pixel_multiplier(struct drm_display_mode *mode,
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 42f165a520de..e44191132ac4 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -715,6 +715,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
715 DMI_MATCH(DMI_PRODUCT_NAME, "EB1007"), 715 DMI_MATCH(DMI_PRODUCT_NAME, "EB1007"),
716 }, 716 },
717 }, 717 },
718 {
719 .callback = intel_no_lvds_dmi_callback,
720 .ident = "Asus AT5NM10T-I",
721 .matches = {
722 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
723 DMI_MATCH(DMI_BOARD_NAME, "AT5NM10T-I"),
724 },
725 },
718 726
719 { } /* terminating entry */ 727 { } /* terminating entry */
720}; 728};
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 499d4c0dbeeb..04d79fd1dc9d 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -178,13 +178,10 @@ u32 intel_panel_get_max_backlight(struct drm_device *dev)
178 if (HAS_PCH_SPLIT(dev)) { 178 if (HAS_PCH_SPLIT(dev)) {
179 max >>= 16; 179 max >>= 16;
180 } else { 180 } else {
181 if (IS_PINEVIEW(dev)) { 181 if (INTEL_INFO(dev)->gen < 4)
182 max >>= 17; 182 max >>= 17;
183 } else { 183 else
184 max >>= 16; 184 max >>= 16;
185 if (INTEL_INFO(dev)->gen < 4)
186 max &= ~1;
187 }
188 185
189 if (is_backlight_combination_mode(dev)) 186 if (is_backlight_combination_mode(dev))
190 max *= 0xff; 187 max *= 0xff;
@@ -203,13 +200,12 @@ u32 intel_panel_get_backlight(struct drm_device *dev)
203 val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; 200 val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
204 } else { 201 } else {
205 val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; 202 val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
206 if (IS_PINEVIEW(dev)) 203 if (INTEL_INFO(dev)->gen < 4)
207 val >>= 1; 204 val >>= 1;
208 205
209 if (is_backlight_combination_mode(dev)) { 206 if (is_backlight_combination_mode(dev)) {
210 u8 lbpc; 207 u8 lbpc;
211 208
212 val &= ~1;
213 pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc); 209 pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc);
214 val *= lbpc; 210 val *= lbpc;
215 } 211 }
@@ -246,11 +242,9 @@ static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level
246 } 242 }
247 243
248 tmp = I915_READ(BLC_PWM_CTL); 244 tmp = I915_READ(BLC_PWM_CTL);
249 if (IS_PINEVIEW(dev)) { 245 if (INTEL_INFO(dev)->gen < 4)
250 tmp &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1);
251 level <<= 1; 246 level <<= 1;
252 } else 247 tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
253 tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
254 I915_WRITE(BLC_PWM_CTL, tmp | level); 248 I915_WRITE(BLC_PWM_CTL, tmp | level);
255} 249}
256 250
@@ -326,7 +320,8 @@ static int intel_panel_update_status(struct backlight_device *bd)
326static int intel_panel_get_brightness(struct backlight_device *bd) 320static int intel_panel_get_brightness(struct backlight_device *bd)
327{ 321{
328 struct drm_device *dev = bl_get_data(bd); 322 struct drm_device *dev = bl_get_data(bd);
329 return intel_panel_get_backlight(dev); 323 struct drm_i915_private *dev_priv = dev->dev_private;
324 return dev_priv->backlight_level;
330} 325}
331 326
332static const struct backlight_ops intel_panel_bl_ops = { 327static const struct backlight_ops intel_panel_bl_ops = {
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 3003fb25aefd..f7b9268df266 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -50,6 +50,7 @@
50#define IS_TMDS(c) (c->output_flag & SDVO_TMDS_MASK) 50#define IS_TMDS(c) (c->output_flag & SDVO_TMDS_MASK)
51#define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK) 51#define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK)
52#define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK)) 52#define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK))
53#define IS_DIGITAL(c) (c->output_flag & (SDVO_TMDS_MASK | SDVO_LVDS_MASK))
53 54
54 55
55static const char *tv_format_names[] = { 56static const char *tv_format_names[] = {
@@ -1086,8 +1087,12 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1086 } 1087 }
1087 sdvox |= (9 << 19) | SDVO_BORDER_ENABLE; 1088 sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
1088 } 1089 }
1089 if (intel_crtc->pipe == 1) 1090
1090 sdvox |= SDVO_PIPE_B_SELECT; 1091 if (INTEL_PCH_TYPE(dev) >= PCH_CPT)
1092 sdvox |= TRANSCODER_CPT(intel_crtc->pipe);
1093 else
1094 sdvox |= TRANSCODER(intel_crtc->pipe);
1095
1091 if (intel_sdvo->has_hdmi_audio) 1096 if (intel_sdvo->has_hdmi_audio)
1092 sdvox |= SDVO_AUDIO_ENABLE; 1097 sdvox |= SDVO_AUDIO_ENABLE;
1093 1098
@@ -1314,6 +1319,18 @@ intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
1314 return status; 1319 return status;
1315} 1320}
1316 1321
1322static bool
1323intel_sdvo_connector_matches_edid(struct intel_sdvo_connector *sdvo,
1324 struct edid *edid)
1325{
1326 bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
1327 bool connector_is_digital = !!IS_DIGITAL(sdvo);
1328
1329 DRM_DEBUG_KMS("connector_is_digital? %d, monitor_is_digital? %d\n",
1330 connector_is_digital, monitor_is_digital);
1331 return connector_is_digital == monitor_is_digital;
1332}
1333
1317static enum drm_connector_status 1334static enum drm_connector_status
1318intel_sdvo_detect(struct drm_connector *connector, bool force) 1335intel_sdvo_detect(struct drm_connector *connector, bool force)
1319{ 1336{
@@ -1358,10 +1375,12 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
1358 if (edid == NULL) 1375 if (edid == NULL)
1359 edid = intel_sdvo_get_analog_edid(connector); 1376 edid = intel_sdvo_get_analog_edid(connector);
1360 if (edid != NULL) { 1377 if (edid != NULL) {
1361 if (edid->input & DRM_EDID_INPUT_DIGITAL) 1378 if (intel_sdvo_connector_matches_edid(intel_sdvo_connector,
1362 ret = connector_status_disconnected; 1379 edid))
1363 else
1364 ret = connector_status_connected; 1380 ret = connector_status_connected;
1381 else
1382 ret = connector_status_disconnected;
1383
1365 connector->display_info.raw_edid = NULL; 1384 connector->display_info.raw_edid = NULL;
1366 kfree(edid); 1385 kfree(edid);
1367 } else 1386 } else
@@ -1402,11 +1421,8 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
1402 edid = intel_sdvo_get_analog_edid(connector); 1421 edid = intel_sdvo_get_analog_edid(connector);
1403 1422
1404 if (edid != NULL) { 1423 if (edid != NULL) {
1405 struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); 1424 if (intel_sdvo_connector_matches_edid(to_intel_sdvo_connector(connector),
1406 bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL); 1425 edid)) {
1407 bool connector_is_digital = !!IS_TMDS(intel_sdvo_connector);
1408
1409 if (connector_is_digital == monitor_is_digital) {
1410 drm_mode_connector_update_edid_property(connector, edid); 1426 drm_mode_connector_update_edid_property(connector, edid);
1411 drm_add_edid_modes(connector, edid); 1427 drm_add_edid_modes(connector, edid);
1412 } 1428 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 032a82098136..5fc201b49d30 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -640,10 +640,9 @@ static int
640nv50_pll_set(struct drm_device *dev, uint32_t reg, uint32_t clk) 640nv50_pll_set(struct drm_device *dev, uint32_t reg, uint32_t clk)
641{ 641{
642 struct drm_nouveau_private *dev_priv = dev->dev_private; 642 struct drm_nouveau_private *dev_priv = dev->dev_private;
643 uint32_t reg0 = nv_rd32(dev, reg + 0);
644 uint32_t reg1 = nv_rd32(dev, reg + 4);
645 struct nouveau_pll_vals pll; 643 struct nouveau_pll_vals pll;
646 struct pll_lims pll_limits; 644 struct pll_lims pll_limits;
645 u32 ctrl, mask, coef;
647 int ret; 646 int ret;
648 647
649 ret = get_pll_limits(dev, reg, &pll_limits); 648 ret = get_pll_limits(dev, reg, &pll_limits);
@@ -654,15 +653,20 @@ nv50_pll_set(struct drm_device *dev, uint32_t reg, uint32_t clk)
654 if (!clk) 653 if (!clk)
655 return -ERANGE; 654 return -ERANGE;
656 655
657 reg0 = (reg0 & 0xfff8ffff) | (pll.log2P << 16); 656 coef = pll.N1 << 8 | pll.M1;
658 reg1 = (reg1 & 0xffff0000) | (pll.N1 << 8) | pll.M1; 657 ctrl = pll.log2P << 16;
659 658 mask = 0x00070000;
660 if (dev_priv->vbios.execute) { 659 if (reg == 0x004008) {
661 still_alive(); 660 mask |= 0x01f80000;
662 nv_wr32(dev, reg + 4, reg1); 661 ctrl |= (pll_limits.log2p_bias << 19);
663 nv_wr32(dev, reg + 0, reg0); 662 ctrl |= (pll.log2P << 22);
664 } 663 }
665 664
665 if (!dev_priv->vbios.execute)
666 return 0;
667
668 nv_mask(dev, reg + 0, mask, ctrl);
669 nv_wr32(dev, reg + 4, coef);
666 return 0; 670 return 0;
667} 671}
668 672
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index b1b33a108b31..f12dd0f39211 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -153,7 +153,7 @@ set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
153 153
154 if (dev_priv->card_type == NV_10 && 154 if (dev_priv->card_type == NV_10 &&
155 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) && 155 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
156 nvbo->bo.mem.num_pages < vram_pages / 2) { 156 nvbo->bo.mem.num_pages < vram_pages / 4) {
157 /* 157 /*
158 * Make sure that the color and depth buffers are handled 158 * Make sure that the color and depth buffers are handled
159 * by independent memory controller units. Up to a 9x 159 * by independent memory controller units. Up to a 9x
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index a319d5646ea9..bb6ec9ef8676 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -158,6 +158,7 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
158 INIT_LIST_HEAD(&chan->nvsw.vbl_wait); 158 INIT_LIST_HEAD(&chan->nvsw.vbl_wait);
159 INIT_LIST_HEAD(&chan->nvsw.flip); 159 INIT_LIST_HEAD(&chan->nvsw.flip);
160 INIT_LIST_HEAD(&chan->fence.pending); 160 INIT_LIST_HEAD(&chan->fence.pending);
161 spin_lock_init(&chan->fence.lock);
161 162
162 /* setup channel's memory and vm */ 163 /* setup channel's memory and vm */
163 ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle); 164 ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index e0d275e1c96c..cea6696b1906 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -710,7 +710,7 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
710 case OUTPUT_DP: 710 case OUTPUT_DP:
711 max_clock = nv_encoder->dp.link_nr; 711 max_clock = nv_encoder->dp.link_nr;
712 max_clock *= nv_encoder->dp.link_bw; 712 max_clock *= nv_encoder->dp.link_bw;
713 clock = clock * nouveau_connector_bpp(connector) / 8; 713 clock = clock * nouveau_connector_bpp(connector) / 10;
714 break; 714 break;
715 default: 715 default:
716 BUG_ON(1); 716 BUG_ON(1);
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 2531ef54c3e9..7e88cd7f2b99 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -369,3 +369,48 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
369 spin_unlock_irqrestore(&dev->event_lock, flags); 369 spin_unlock_irqrestore(&dev->event_lock, flags);
370 return 0; 370 return 0;
371} 371}
372
373int
374nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
375 struct drm_mode_create_dumb *args)
376{
377 struct nouveau_bo *bo;
378 int ret;
379
380 args->pitch = roundup(args->width * (args->bpp / 8), 256);
381 args->size = args->pitch * args->height;
382 args->size = roundup(args->size, PAGE_SIZE);
383
384 ret = nouveau_gem_new(dev, args->size, 0, TTM_PL_FLAG_VRAM, 0, 0, &bo);
385 if (ret)
386 return ret;
387
388 ret = drm_gem_handle_create(file_priv, bo->gem, &args->handle);
389 drm_gem_object_unreference_unlocked(bo->gem);
390 return ret;
391}
392
393int
394nouveau_display_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
395 uint32_t handle)
396{
397 return drm_gem_handle_delete(file_priv, handle);
398}
399
400int
401nouveau_display_dumb_map_offset(struct drm_file *file_priv,
402 struct drm_device *dev,
403 uint32_t handle, uint64_t *poffset)
404{
405 struct drm_gem_object *gem;
406
407 gem = drm_gem_object_lookup(dev, file_priv, handle);
408 if (gem) {
409 struct nouveau_bo *bo = gem->driver_private;
410 *poffset = bo->bo.addr_space_offset;
411 drm_gem_object_unreference_unlocked(gem);
412 return 0;
413 }
414
415 return -ENOENT;
416}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index d661bc5e3945..f0a60afac446 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -434,6 +434,10 @@ static struct drm_driver driver = {
434 .gem_open_object = nouveau_gem_object_open, 434 .gem_open_object = nouveau_gem_object_open,
435 .gem_close_object = nouveau_gem_object_close, 435 .gem_close_object = nouveau_gem_object_close,
436 436
437 .dumb_create = nouveau_display_dumb_create,
438 .dumb_map_offset = nouveau_display_dumb_map_offset,
439 .dumb_destroy = nouveau_display_dumb_destroy,
440
437 .name = DRIVER_NAME, 441 .name = DRIVER_NAME,
438 .desc = DRIVER_DESC, 442 .desc = DRIVER_DESC,
439#ifdef GIT_REVISION 443#ifdef GIT_REVISION
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 0c53e39fc6c9..dfddb7e078a1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -1421,6 +1421,12 @@ int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1421 struct drm_pending_vblank_event *event); 1421 struct drm_pending_vblank_event *event);
1422int nouveau_finish_page_flip(struct nouveau_channel *, 1422int nouveau_finish_page_flip(struct nouveau_channel *,
1423 struct nouveau_page_flip_state *); 1423 struct nouveau_page_flip_state *);
1424int nouveau_display_dumb_create(struct drm_file *, struct drm_device *,
1425 struct drm_mode_create_dumb *args);
1426int nouveau_display_dumb_map_offset(struct drm_file *, struct drm_device *,
1427 uint32_t handle, uint64_t *offset);
1428int nouveau_display_dumb_destroy(struct drm_file *, struct drm_device *,
1429 uint32_t handle);
1424 1430
1425/* nv10_gpio.c */ 1431/* nv10_gpio.c */
1426int nv10_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag); 1432int nv10_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index defffd140781..dbb151834121 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -488,6 +488,7 @@ int nouveau_fbcon_init(struct drm_device *dev)
488{ 488{
489 struct drm_nouveau_private *dev_priv = dev->dev_private; 489 struct drm_nouveau_private *dev_priv = dev->dev_private;
490 struct nouveau_fbdev *nfbdev; 490 struct nouveau_fbdev *nfbdev;
491 int preferred_bpp;
491 int ret; 492 int ret;
492 493
493 nfbdev = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL); 494 nfbdev = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL);
@@ -506,7 +507,15 @@ int nouveau_fbcon_init(struct drm_device *dev)
506 } 507 }
507 508
508 drm_fb_helper_single_add_all_connectors(&nfbdev->helper); 509 drm_fb_helper_single_add_all_connectors(&nfbdev->helper);
509 drm_fb_helper_initial_config(&nfbdev->helper, 32); 510
511 if (dev_priv->vram_size <= 32 * 1024 * 1024)
512 preferred_bpp = 8;
513 else if (dev_priv->vram_size <= 64 * 1024 * 1024)
514 preferred_bpp = 16;
515 else
516 preferred_bpp = 32;
517
518 drm_fb_helper_initial_config(&nfbdev->helper, preferred_bpp);
510 return 0; 519 return 0;
511} 520}
512 521
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 81116cfea275..2f6daae68b9d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -539,8 +539,6 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
539 return ret; 539 return ret;
540 } 540 }
541 541
542 INIT_LIST_HEAD(&chan->fence.pending);
543 spin_lock_init(&chan->fence.lock);
544 atomic_set(&chan->fence.last_sequence_irq, 0); 542 atomic_set(&chan->fence.last_sequence_irq, 0);
545 return 0; 543 return 0;
546} 544}
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c
index c6143df48b9f..d39b2202b197 100644
--- a/drivers/gpu/drm/nouveau/nouveau_i2c.c
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c
@@ -333,7 +333,7 @@ nouveau_i2c_identify(struct drm_device *dev, const char *what,
333 333
334 NV_DEBUG(dev, "Probing %ss on I2C bus: %d\n", what, index); 334 NV_DEBUG(dev, "Probing %ss on I2C bus: %d\n", what, index);
335 335
336 for (i = 0; info[i].addr; i++) { 336 for (i = 0; i2c && info[i].addr; i++) {
337 if (nouveau_probe_i2c_addr(i2c, info[i].addr) && 337 if (nouveau_probe_i2c_addr(i2c, info[i].addr) &&
338 (!match || match(i2c, &info[i]))) { 338 (!match || match(i2c, &info[i]))) {
339 NV_INFO(dev, "Detected %s: %s\n", what, info[i].type); 339 NV_INFO(dev, "Detected %s: %s\n", what, info[i].type);
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index 02222c540aee..960c0ae0c0c3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -680,7 +680,7 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
680 return ret; 680 return ret;
681 } 681 }
682 682
683 ret = drm_mm_init(&chan->ramin_heap, base, size); 683 ret = drm_mm_init(&chan->ramin_heap, base, size - base);
684 if (ret) { 684 if (ret) {
685 NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret); 685 NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret);
686 nouveau_gpuobj_ref(NULL, &chan->ramin); 686 nouveau_gpuobj_ref(NULL, &chan->ramin);
diff --git a/drivers/gpu/drm/nouveau/nouveau_perf.c b/drivers/gpu/drm/nouveau/nouveau_perf.c
index 9f178aa94162..33d03fbf00df 100644
--- a/drivers/gpu/drm/nouveau/nouveau_perf.c
+++ b/drivers/gpu/drm/nouveau/nouveau_perf.c
@@ -239,7 +239,7 @@ nouveau_perf_init(struct drm_device *dev)
239 if(version == 0x15) { 239 if(version == 0x15) {
240 memtimings->timing = 240 memtimings->timing =
241 kcalloc(entries, sizeof(*memtimings->timing), GFP_KERNEL); 241 kcalloc(entries, sizeof(*memtimings->timing), GFP_KERNEL);
242 if(!memtimings) { 242 if (!memtimings->timing) {
243 NV_WARN(dev,"Could not allocate memtiming table\n"); 243 NV_WARN(dev,"Could not allocate memtiming table\n");
244 return; 244 return;
245 } 245 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index 82478e0998e5..d8831ab42bb9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -579,6 +579,14 @@ nouveau_card_init(struct drm_device *dev)
579 if (ret) 579 if (ret)
580 goto out_display_early; 580 goto out_display_early;
581 581
582 /* workaround an odd issue on nvc1 by disabling the device's
583 * nosnoop capability. hopefully won't cause issues until a
584 * better fix is found - assuming there is one...
585 */
586 if (dev_priv->chipset == 0xc1) {
587 nv_mask(dev, 0x00088080, 0x00000800, 0x00000000);
588 }
589
582 nouveau_pm_init(dev); 590 nouveau_pm_init(dev);
583 591
584 ret = engine->vram.init(dev); 592 ret = engine->vram.init(dev);
@@ -1102,12 +1110,13 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
1102 dev_priv->noaccel = !!nouveau_noaccel; 1110 dev_priv->noaccel = !!nouveau_noaccel;
1103 if (nouveau_noaccel == -1) { 1111 if (nouveau_noaccel == -1) {
1104 switch (dev_priv->chipset) { 1112 switch (dev_priv->chipset) {
1105 case 0xc1: /* known broken */ 1113#if 0
1106 case 0xc8: /* never tested */ 1114 case 0xXX: /* known broken */
1107 NV_INFO(dev, "acceleration disabled by default, pass " 1115 NV_INFO(dev, "acceleration disabled by default, pass "
1108 "noaccel=0 to force enable\n"); 1116 "noaccel=0 to force enable\n");
1109 dev_priv->noaccel = true; 1117 dev_priv->noaccel = true;
1110 break; 1118 break;
1119#endif
1111 default: 1120 default:
1112 dev_priv->noaccel = false; 1121 dev_priv->noaccel = false;
1113 break; 1122 break;
diff --git a/drivers/gpu/drm/nouveau/nv40_pm.c b/drivers/gpu/drm/nouveau/nv40_pm.c
index bbc0b9c7e1f7..e676b0d53478 100644
--- a/drivers/gpu/drm/nouveau/nv40_pm.c
+++ b/drivers/gpu/drm/nouveau/nv40_pm.c
@@ -57,12 +57,14 @@ read_pll_2(struct drm_device *dev, u32 reg)
57 int P = (ctrl & 0x00070000) >> 16; 57 int P = (ctrl & 0x00070000) >> 16;
58 u32 ref = 27000, clk = 0; 58 u32 ref = 27000, clk = 0;
59 59
60 if (ctrl & 0x80000000) 60 if ((ctrl & 0x80000000) && M1) {
61 clk = ref * N1 / M1; 61 clk = ref * N1 / M1;
62 62 if ((ctrl & 0x40000100) == 0x40000000) {
63 if (!(ctrl & 0x00000100)) { 63 if (M2)
64 if (ctrl & 0x40000000) 64 clk = clk * N2 / M2;
65 clk = clk * N2 / M2; 65 else
66 clk = 0;
67 }
66 } 68 }
67 69
68 return clk >> P; 70 return clk >> P;
@@ -177,6 +179,11 @@ nv40_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
177 } 179 }
178 180
179 /* memory clock */ 181 /* memory clock */
182 if (!perflvl->memory) {
183 info->mpll_ctrl = 0x00000000;
184 goto out;
185 }
186
180 ret = nv40_calc_pll(dev, 0x004020, &pll, perflvl->memory, 187 ret = nv40_calc_pll(dev, 0x004020, &pll, perflvl->memory,
181 &N1, &M1, &N2, &M2, &log2P); 188 &N1, &M1, &N2, &M2, &log2P);
182 if (ret < 0) 189 if (ret < 0)
@@ -264,6 +271,9 @@ nv40_pm_clocks_set(struct drm_device *dev, void *pre_state)
264 mdelay(5); 271 mdelay(5);
265 nv_mask(dev, 0x00c040, 0x00000333, info->ctrl); 272 nv_mask(dev, 0x00c040, 0x00000333, info->ctrl);
266 273
274 if (!info->mpll_ctrl)
275 goto resume;
276
267 /* wait for vblank start on active crtcs, disable memory access */ 277 /* wait for vblank start on active crtcs, disable memory access */
268 for (i = 0; i < 2; i++) { 278 for (i = 0; i < 2; i++) {
269 if (!(crtc_mask & (1 << i))) 279 if (!(crtc_mask & (1 << i)))
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index d23ca00e7d62..06de250fe617 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -616,7 +616,7 @@ nv50_display_unk10_handler(struct drm_device *dev)
616 struct drm_nouveau_private *dev_priv = dev->dev_private; 616 struct drm_nouveau_private *dev_priv = dev->dev_private;
617 struct nv50_display *disp = nv50_display(dev); 617 struct nv50_display *disp = nv50_display(dev);
618 u32 unk30 = nv_rd32(dev, 0x610030), mc; 618 u32 unk30 = nv_rd32(dev, 0x610030), mc;
619 int i, crtc, or, type = OUTPUT_ANY; 619 int i, crtc, or = 0, type = OUTPUT_ANY;
620 620
621 NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30); 621 NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
622 disp->irq.dcb = NULL; 622 disp->irq.dcb = NULL;
@@ -708,7 +708,7 @@ nv50_display_unk20_handler(struct drm_device *dev)
708 struct nv50_display *disp = nv50_display(dev); 708 struct nv50_display *disp = nv50_display(dev);
709 u32 unk30 = nv_rd32(dev, 0x610030), tmp, pclk, script, mc = 0; 709 u32 unk30 = nv_rd32(dev, 0x610030), tmp, pclk, script, mc = 0;
710 struct dcb_entry *dcb; 710 struct dcb_entry *dcb;
711 int i, crtc, or, type = OUTPUT_ANY; 711 int i, crtc, or = 0, type = OUTPUT_ANY;
712 712
713 NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30); 713 NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
714 dcb = disp->irq.dcb; 714 dcb = disp->irq.dcb;
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index 8c979b31ff61..ac601f7c4e1a 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -131,8 +131,8 @@ nv50_graph_init(struct drm_device *dev, int engine)
131 NV_DEBUG(dev, "\n"); 131 NV_DEBUG(dev, "\n");
132 132
133 /* master reset */ 133 /* master reset */
134 nv_mask(dev, 0x000200, 0x00200100, 0x00000000); 134 nv_mask(dev, 0x000200, 0x00201000, 0x00000000);
135 nv_mask(dev, 0x000200, 0x00200100, 0x00200100); 135 nv_mask(dev, 0x000200, 0x00201000, 0x00201000);
136 nv_wr32(dev, 0x40008c, 0x00000004); /* HW_CTX_SWITCH_ENABLED */ 136 nv_wr32(dev, 0x40008c, 0x00000004); /* HW_CTX_SWITCH_ENABLED */
137 137
138 /* reset/enable traps and interrupts */ 138 /* reset/enable traps and interrupts */
diff --git a/drivers/gpu/drm/nouveau/nv50_grctx.c b/drivers/gpu/drm/nouveau/nv50_grctx.c
index d05c2c3b2444..4b46d6968566 100644
--- a/drivers/gpu/drm/nouveau/nv50_grctx.c
+++ b/drivers/gpu/drm/nouveau/nv50_grctx.c
@@ -601,7 +601,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
601 gr_def(ctx, offset + 0x1c, 0x00880000); 601 gr_def(ctx, offset + 0x1c, 0x00880000);
602 break; 602 break;
603 case 0x86: 603 case 0x86:
604 gr_def(ctx, offset + 0x1c, 0x008c0000); 604 gr_def(ctx, offset + 0x1c, 0x018c0000);
605 break; 605 break;
606 case 0x92: 606 case 0x92:
607 case 0x96: 607 case 0x96:
diff --git a/drivers/gpu/drm/nouveau/nv50_vram.c b/drivers/gpu/drm/nouveau/nv50_vram.c
index 9da23838e63e..2e45e57fd869 100644
--- a/drivers/gpu/drm/nouveau/nv50_vram.c
+++ b/drivers/gpu/drm/nouveau/nv50_vram.c
@@ -160,7 +160,7 @@ nv50_vram_rblock(struct drm_device *dev)
160 colbits = (r4 & 0x0000f000) >> 12; 160 colbits = (r4 & 0x0000f000) >> 12;
161 rowbitsa = ((r4 & 0x000f0000) >> 16) + 8; 161 rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
162 rowbitsb = ((r4 & 0x00f00000) >> 20) + 8; 162 rowbitsb = ((r4 & 0x00f00000) >> 20) + 8;
163 banks = ((r4 & 0x01000000) ? 8 : 4); 163 banks = 1 << (((r4 & 0x03000000) >> 24) + 2);
164 164
165 rowsize = parts * banks * (1 << colbits) * 8; 165 rowsize = parts * banks * (1 << colbits) * 8;
166 predicted = rowsize << rowbitsa; 166 predicted = rowsize << rowbitsa;
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c
index bbdbc51830c8..ecfafd70cf0e 100644
--- a/drivers/gpu/drm/nouveau/nvc0_graph.c
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.c
@@ -157,8 +157,8 @@ nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan)
157 struct nvc0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR); 157 struct nvc0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR);
158 struct nvc0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR]; 158 struct nvc0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
159 struct drm_device *dev = chan->dev; 159 struct drm_device *dev = chan->dev;
160 struct drm_nouveau_private *dev_priv = dev->dev_private;
160 int i = 0, gpc, tp, ret; 161 int i = 0, gpc, tp, ret;
161 u32 magic;
162 162
163 ret = nouveau_gpuobj_new(dev, chan, 0x2000, 256, NVOBJ_FLAG_VM, 163 ret = nouveau_gpuobj_new(dev, chan, 0x2000, 256, NVOBJ_FLAG_VM,
164 &grch->unk408004); 164 &grch->unk408004);
@@ -207,14 +207,37 @@ nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan)
207 nv_wo32(grch->mmio, i++ * 4, 0x0041880c); 207 nv_wo32(grch->mmio, i++ * 4, 0x0041880c);
208 nv_wo32(grch->mmio, i++ * 4, 0x80000018); 208 nv_wo32(grch->mmio, i++ * 4, 0x80000018);
209 209
210 magic = 0x02180000; 210 if (dev_priv->chipset != 0xc1) {
211 nv_wo32(grch->mmio, i++ * 4, 0x00405830); 211 u32 magic = 0x02180000;
212 nv_wo32(grch->mmio, i++ * 4, magic); 212 nv_wo32(grch->mmio, i++ * 4, 0x00405830);
213 for (gpc = 0; gpc < priv->gpc_nr; gpc++) { 213 nv_wo32(grch->mmio, i++ * 4, magic);
214 for (tp = 0; tp < priv->tp_nr[gpc]; tp++, magic += 0x0324) { 214 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
215 u32 reg = 0x504520 + (gpc * 0x8000) + (tp * 0x0800); 215 for (tp = 0; tp < priv->tp_nr[gpc]; tp++) {
216 nv_wo32(grch->mmio, i++ * 4, reg); 216 u32 reg = TP_UNIT(gpc, tp, 0x520);
217 nv_wo32(grch->mmio, i++ * 4, magic); 217 nv_wo32(grch->mmio, i++ * 4, reg);
218 nv_wo32(grch->mmio, i++ * 4, magic);
219 magic += 0x0324;
220 }
221 }
222 } else {
223 u32 magic = 0x02180000;
224 nv_wo32(grch->mmio, i++ * 4, 0x00405830);
225 nv_wo32(grch->mmio, i++ * 4, magic | 0x0000218);
226 nv_wo32(grch->mmio, i++ * 4, 0x004064c4);
227 nv_wo32(grch->mmio, i++ * 4, 0x0086ffff);
228 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
229 for (tp = 0; tp < priv->tp_nr[gpc]; tp++) {
230 u32 reg = TP_UNIT(gpc, tp, 0x520);
231 nv_wo32(grch->mmio, i++ * 4, reg);
232 nv_wo32(grch->mmio, i++ * 4, (1 << 28) | magic);
233 magic += 0x0324;
234 }
235 for (tp = 0; tp < priv->tp_nr[gpc]; tp++) {
236 u32 reg = TP_UNIT(gpc, tp, 0x544);
237 nv_wo32(grch->mmio, i++ * 4, reg);
238 nv_wo32(grch->mmio, i++ * 4, magic);
239 magic += 0x0324;
240 }
218 } 241 }
219 } 242 }
220 243
@@ -358,6 +381,8 @@ nvc0_graph_init_gpc_0(struct drm_device *dev)
358 u8 tpnr[GPC_MAX]; 381 u8 tpnr[GPC_MAX];
359 int i, gpc, tpc; 382 int i, gpc, tpc;
360 383
384 nv_wr32(dev, TP_UNIT(0, 0, 0x5c), 1); /* affects TFB offset queries */
385
361 /* 386 /*
362 * TP ROP UNKVAL(magic_not_rop_nr) 387 * TP ROP UNKVAL(magic_not_rop_nr)
363 * 450: 4/0/0/0 2 3 388 * 450: 4/0/0/0 2 3
diff --git a/drivers/gpu/drm/nouveau/nvc0_grctx.c b/drivers/gpu/drm/nouveau/nvc0_grctx.c
index dd0e6a736b3b..96b0b93d94ca 100644
--- a/drivers/gpu/drm/nouveau/nvc0_grctx.c
+++ b/drivers/gpu/drm/nouveau/nvc0_grctx.c
@@ -1812,6 +1812,7 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
1812 /* calculate first set of magics */ 1812 /* calculate first set of magics */
1813 memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr)); 1813 memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr));
1814 1814
1815 gpc = -1;
1815 for (tp = 0; tp < priv->tp_total; tp++) { 1816 for (tp = 0; tp < priv->tp_total; tp++) {
1816 do { 1817 do {
1817 gpc = (gpc + 1) % priv->gpc_nr; 1818 gpc = (gpc + 1) % priv->gpc_nr;
@@ -1861,30 +1862,26 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
1861 1862
1862 if (1) { 1863 if (1) {
1863 u32 tp_mask = 0, tp_set = 0; 1864 u32 tp_mask = 0, tp_set = 0;
1864 u8 tpnr[GPC_MAX]; 1865 u8 tpnr[GPC_MAX], a, b;
1865 1866
1866 memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr)); 1867 memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr));
1867 for (gpc = 0; gpc < priv->gpc_nr; gpc++) 1868 for (gpc = 0; gpc < priv->gpc_nr; gpc++)
1868 tp_mask |= ((1 << priv->tp_nr[gpc]) - 1) << (gpc * 8); 1869 tp_mask |= ((1 << priv->tp_nr[gpc]) - 1) << (gpc * 8);
1869 1870
1870 gpc = -1; 1871 for (i = 0, gpc = -1, b = -1; i < 32; i++) {
1871 for (i = 0, gpc = -1; i < 32; i++) { 1872 a = (i * (priv->tp_total - 1)) / 32;
1872 int ltp = i * (priv->tp_total - 1) / 32; 1873 if (a != b) {
1873 1874 b = a;
1874 do { 1875 do {
1875 gpc = (gpc + 1) % priv->gpc_nr; 1876 gpc = (gpc + 1) % priv->gpc_nr;
1876 } while (!tpnr[gpc]); 1877 } while (!tpnr[gpc]);
1877 tp = priv->tp_nr[gpc] - tpnr[gpc]--; 1878 tp = priv->tp_nr[gpc] - tpnr[gpc]--;
1878 1879
1879 tp_set |= 1 << ((gpc * 8) + tp); 1880 tp_set |= 1 << ((gpc * 8) + tp);
1881 }
1880 1882
1881 do { 1883 nv_wr32(dev, 0x406800 + (i * 0x20), tp_set);
1882 nv_wr32(dev, 0x406800 + (i * 0x20), tp_set); 1884 nv_wr32(dev, 0x406c00 + (i * 0x20), tp_set ^ tp_mask);
1883 tp_set ^= tp_mask;
1884 nv_wr32(dev, 0x406c00 + (i * 0x20), tp_set);
1885 tp_set ^= tp_mask;
1886 } while (ltp == (++i * (priv->tp_total - 1) / 32));
1887 i--;
1888 } 1885 }
1889 } 1886 }
1890 1887
diff --git a/drivers/gpu/drm/nouveau/nvc0_vram.c b/drivers/gpu/drm/nouveau/nvc0_vram.c
index edbfe9360ae2..ce984d573a51 100644
--- a/drivers/gpu/drm/nouveau/nvc0_vram.c
+++ b/drivers/gpu/drm/nouveau/nvc0_vram.c
@@ -43,7 +43,7 @@ static const u8 types[256] = {
43 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 43 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
44 0, 0, 0, 3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, 0, 44 0, 0, 0, 3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, 0,
45 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 45 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3,
46 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 46 3, 3, 3, 1, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3,
47 3, 3, 0, 0, 0, 0, 0, 0, 3, 0, 0, 3, 0, 3, 0, 3, 47 3, 3, 0, 0, 0, 0, 0, 0, 3, 0, 0, 3, 0, 3, 0, 3,
48 3, 0, 3, 3, 3, 3, 3, 0, 0, 3, 0, 3, 0, 3, 3, 0, 48 3, 0, 3, 3, 3, 3, 3, 0, 0, 3, 0, 3, 0, 3, 3, 0,
49 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 1, 1, 0 49 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 1, 1, 0
@@ -110,22 +110,26 @@ nvc0_vram_init(struct drm_device *dev)
110 u32 bsize = nv_rd32(dev, 0x10f20c); 110 u32 bsize = nv_rd32(dev, 0x10f20c);
111 u32 offset, length; 111 u32 offset, length;
112 bool uniform = true; 112 bool uniform = true;
113 int ret, i; 113 int ret, part;
114 114
115 NV_DEBUG(dev, "0x100800: 0x%08x\n", nv_rd32(dev, 0x100800)); 115 NV_DEBUG(dev, "0x100800: 0x%08x\n", nv_rd32(dev, 0x100800));
116 NV_DEBUG(dev, "parts 0x%08x bcast_mem_amount 0x%08x\n", parts, bsize); 116 NV_DEBUG(dev, "parts 0x%08x bcast_mem_amount 0x%08x\n", parts, bsize);
117 117
118 /* read amount of vram attached to each memory controller */ 118 /* read amount of vram attached to each memory controller */
119 for (i = 0; i < parts; i++) { 119 part = 0;
120 u32 psize = nv_rd32(dev, 0x11020c + (i * 0x1000)); 120 while (parts) {
121 u32 psize = nv_rd32(dev, 0x11020c + (part++ * 0x1000));
122 if (psize == 0)
123 continue;
124 parts--;
125
121 if (psize != bsize) { 126 if (psize != bsize) {
122 if (psize < bsize) 127 if (psize < bsize)
123 bsize = psize; 128 bsize = psize;
124 uniform = false; 129 uniform = false;
125 } 130 }
126 131
127 NV_DEBUG(dev, "%d: mem_amount 0x%08x\n", i, psize); 132 NV_DEBUG(dev, "%d: mem_amount 0x%08x\n", part, psize);
128
129 dev_priv->vram_size += (u64)psize << 20; 133 dev_priv->vram_size += (u64)psize << 20;
130 } 134 }
131 135
diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c
index 23d63b4b3d77..cb006a718e70 100644
--- a/drivers/gpu/drm/nouveau/nvd0_display.c
+++ b/drivers/gpu/drm/nouveau/nvd0_display.c
@@ -780,7 +780,7 @@ nvd0_sor_dpms(struct drm_encoder *encoder, int mode)
780 continue; 780 continue;
781 781
782 if (nv_partner != nv_encoder && 782 if (nv_partner != nv_encoder &&
783 nv_partner->dcb->or == nv_encoder->or) { 783 nv_partner->dcb->or == nv_encoder->dcb->or) {
784 if (nv_partner->last_dpms == DRM_MODE_DPMS_ON) 784 if (nv_partner->last_dpms == DRM_MODE_DPMS_ON)
785 return; 785 return;
786 break; 786 break;
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 7567ff2510e0..457bbad3cbf9 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1107,9 +1107,40 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
1107 return -EINVAL; 1107 return -EINVAL;
1108 } 1108 }
1109 1109
1110 if (tiling_flags & RADEON_TILING_MACRO) 1110 if (tiling_flags & RADEON_TILING_MACRO) {
1111 if (rdev->family >= CHIP_CAYMAN)
1112 tmp = rdev->config.cayman.tile_config;
1113 else
1114 tmp = rdev->config.evergreen.tile_config;
1115
1116 switch ((tmp & 0xf0) >> 4) {
1117 case 0: /* 4 banks */
1118 fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_4_BANK);
1119 break;
1120 case 1: /* 8 banks */
1121 default:
1122 fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_8_BANK);
1123 break;
1124 case 2: /* 16 banks */
1125 fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_16_BANK);
1126 break;
1127 }
1128
1129 switch ((tmp & 0xf000) >> 12) {
1130 case 0: /* 1KB rows */
1131 default:
1132 fb_format |= EVERGREEN_GRPH_TILE_SPLIT(EVERGREEN_ADDR_SURF_TILE_SPLIT_1KB);
1133 break;
1134 case 1: /* 2KB rows */
1135 fb_format |= EVERGREEN_GRPH_TILE_SPLIT(EVERGREEN_ADDR_SURF_TILE_SPLIT_2KB);
1136 break;
1137 case 2: /* 4KB rows */
1138 fb_format |= EVERGREEN_GRPH_TILE_SPLIT(EVERGREEN_ADDR_SURF_TILE_SPLIT_4KB);
1139 break;
1140 }
1141
1111 fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1); 1142 fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1);
1112 else if (tiling_flags & RADEON_TILING_MICRO) 1143 } else if (tiling_flags & RADEON_TILING_MICRO)
1113 fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1); 1144 fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
1114 1145
1115 switch (radeon_crtc->crtc_id) { 1146 switch (radeon_crtc->crtc_id) {
@@ -1522,12 +1553,6 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
1522 struct drm_display_mode *mode, 1553 struct drm_display_mode *mode,
1523 struct drm_display_mode *adjusted_mode) 1554 struct drm_display_mode *adjusted_mode)
1524{ 1555{
1525 struct drm_device *dev = crtc->dev;
1526 struct radeon_device *rdev = dev->dev_private;
1527
1528 /* adjust pm to upcoming mode change */
1529 radeon_pm_compute_clocks(rdev);
1530
1531 if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) 1556 if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
1532 return false; 1557 return false;
1533 return true; 1558 return true;
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index a0de48542f71..6fb335a4fdda 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -283,7 +283,7 @@ int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
283 } 283 }
284 } 284 }
285 285
286 DRM_ERROR("aux i2c too many retries, giving up\n"); 286 DRM_DEBUG_KMS("aux i2c too many retries, giving up\n");
287 return -EREMOTEIO; 287 return -EREMOTEIO;
288} 288}
289 289
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index e4c384b9511c..5e00d1670aa9 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -82,6 +82,7 @@ u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
82{ 82{
83 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; 83 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
84 u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset); 84 u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset);
85 int i;
85 86
86 /* Lock the graphics update lock */ 87 /* Lock the graphics update lock */
87 tmp |= EVERGREEN_GRPH_UPDATE_LOCK; 88 tmp |= EVERGREEN_GRPH_UPDATE_LOCK;
@@ -99,7 +100,11 @@ u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
99 (u32)crtc_base); 100 (u32)crtc_base);
100 101
101 /* Wait for update_pending to go high. */ 102 /* Wait for update_pending to go high. */
102 while (!(RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING)); 103 for (i = 0; i < rdev->usec_timeout; i++) {
104 if (RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING)
105 break;
106 udelay(1);
107 }
103 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); 108 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
104 109
105 /* Unlock the lock, so double-buffering can take place inside vblank */ 110 /* Unlock the lock, so double-buffering can take place inside vblank */
@@ -157,6 +162,57 @@ int sumo_get_temp(struct radeon_device *rdev)
157 return actual_temp * 1000; 162 return actual_temp * 1000;
158} 163}
159 164
165void sumo_pm_init_profile(struct radeon_device *rdev)
166{
167 int idx;
168
169 /* default */
170 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
171 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
172 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
173 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
174
175 /* low,mid sh/mh */
176 if (rdev->flags & RADEON_IS_MOBILITY)
177 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
178 else
179 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
180
181 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
182 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
183 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
184 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
185
186 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
187 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
188 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
189 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
190
191 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
192 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
193 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
194 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
195
196 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
197 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
198 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
199 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
200
201 /* high sh/mh */
202 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
203 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
204 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
205 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
206 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx =
207 rdev->pm.power_state[idx].num_clock_modes - 1;
208
209 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
210 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
211 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
212 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx =
213 rdev->pm.power_state[idx].num_clock_modes - 1;
214}
215
160void evergreen_pm_misc(struct radeon_device *rdev) 216void evergreen_pm_misc(struct radeon_device *rdev)
161{ 217{
162 int req_ps_idx = rdev->pm.requested_power_state_index; 218 int req_ps_idx = rdev->pm.requested_power_state_index;
@@ -1219,7 +1275,7 @@ void evergreen_mc_program(struct radeon_device *rdev)
1219 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, 1275 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1220 rdev->mc.vram_end >> 12); 1276 rdev->mc.vram_end >> 12);
1221 } 1277 }
1222 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0); 1278 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
1223 if (rdev->flags & RADEON_IS_IGP) { 1279 if (rdev->flags & RADEON_IS_IGP) {
1224 tmp = RREG32(MC_FUS_VM_FB_OFFSET) & 0x000FFFFF; 1280 tmp = RREG32(MC_FUS_VM_FB_OFFSET) & 0x000FFFFF;
1225 tmp |= ((rdev->mc.vram_end >> 20) & 0xF) << 24; 1281 tmp |= ((rdev->mc.vram_end >> 20) & 0xF) << 24;
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 7fdfa8ea7570..cd4590aae154 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -38,6 +38,7 @@ struct evergreen_cs_track {
38 u32 group_size; 38 u32 group_size;
39 u32 nbanks; 39 u32 nbanks;
40 u32 npipes; 40 u32 npipes;
41 u32 row_size;
41 /* value we track */ 42 /* value we track */
42 u32 nsamples; 43 u32 nsamples;
43 u32 cb_color_base_last[12]; 44 u32 cb_color_base_last[12];
@@ -77,6 +78,44 @@ struct evergreen_cs_track {
77 struct radeon_bo *db_s_write_bo; 78 struct radeon_bo *db_s_write_bo;
78}; 79};
79 80
81static u32 evergreen_cs_get_aray_mode(u32 tiling_flags)
82{
83 if (tiling_flags & RADEON_TILING_MACRO)
84 return ARRAY_2D_TILED_THIN1;
85 else if (tiling_flags & RADEON_TILING_MICRO)
86 return ARRAY_1D_TILED_THIN1;
87 else
88 return ARRAY_LINEAR_GENERAL;
89}
90
91static u32 evergreen_cs_get_num_banks(u32 nbanks)
92{
93 switch (nbanks) {
94 case 2:
95 return ADDR_SURF_2_BANK;
96 case 4:
97 return ADDR_SURF_4_BANK;
98 case 8:
99 default:
100 return ADDR_SURF_8_BANK;
101 case 16:
102 return ADDR_SURF_16_BANK;
103 }
104}
105
106static u32 evergreen_cs_get_tile_split(u32 row_size)
107{
108 switch (row_size) {
109 case 1:
110 default:
111 return ADDR_SURF_TILE_SPLIT_1KB;
112 case 2:
113 return ADDR_SURF_TILE_SPLIT_2KB;
114 case 4:
115 return ADDR_SURF_TILE_SPLIT_4KB;
116 }
117}
118
80static void evergreen_cs_track_init(struct evergreen_cs_track *track) 119static void evergreen_cs_track_init(struct evergreen_cs_track *track)
81{ 120{
82 int i; 121 int i;
@@ -480,21 +519,22 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
480 } 519 }
481 break; 520 break;
482 case DB_Z_INFO: 521 case DB_Z_INFO:
483 r = evergreen_cs_packet_next_reloc(p, &reloc);
484 if (r) {
485 dev_warn(p->dev, "bad SET_CONTEXT_REG "
486 "0x%04X\n", reg);
487 return -EINVAL;
488 }
489 track->db_z_info = radeon_get_ib_value(p, idx); 522 track->db_z_info = radeon_get_ib_value(p, idx);
490 ib[idx] &= ~Z_ARRAY_MODE(0xf); 523 if (!p->keep_tiling_flags) {
491 track->db_z_info &= ~Z_ARRAY_MODE(0xf); 524 r = evergreen_cs_packet_next_reloc(p, &reloc);
492 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { 525 if (r) {
493 ib[idx] |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1); 526 dev_warn(p->dev, "bad SET_CONTEXT_REG "
494 track->db_z_info |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1); 527 "0x%04X\n", reg);
495 } else { 528 return -EINVAL;
496 ib[idx] |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1); 529 }
497 track->db_z_info |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1); 530 ib[idx] &= ~Z_ARRAY_MODE(0xf);
531 track->db_z_info &= ~Z_ARRAY_MODE(0xf);
532 ib[idx] |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
533 track->db_z_info |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
534 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
535 ib[idx] |= DB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
536 ib[idx] |= DB_TILE_SPLIT(evergreen_cs_get_tile_split(track->row_size));
537 }
498 } 538 }
499 break; 539 break;
500 case DB_STENCIL_INFO: 540 case DB_STENCIL_INFO:
@@ -607,40 +647,34 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
607 case CB_COLOR5_INFO: 647 case CB_COLOR5_INFO:
608 case CB_COLOR6_INFO: 648 case CB_COLOR6_INFO:
609 case CB_COLOR7_INFO: 649 case CB_COLOR7_INFO:
610 r = evergreen_cs_packet_next_reloc(p, &reloc);
611 if (r) {
612 dev_warn(p->dev, "bad SET_CONTEXT_REG "
613 "0x%04X\n", reg);
614 return -EINVAL;
615 }
616 tmp = (reg - CB_COLOR0_INFO) / 0x3c; 650 tmp = (reg - CB_COLOR0_INFO) / 0x3c;
617 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx); 651 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
618 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { 652 if (!p->keep_tiling_flags) {
619 ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); 653 r = evergreen_cs_packet_next_reloc(p, &reloc);
620 track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); 654 if (r) {
621 } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { 655 dev_warn(p->dev, "bad SET_CONTEXT_REG "
622 ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); 656 "0x%04X\n", reg);
623 track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); 657 return -EINVAL;
658 }
659 ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
660 track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
624 } 661 }
625 break; 662 break;
626 case CB_COLOR8_INFO: 663 case CB_COLOR8_INFO:
627 case CB_COLOR9_INFO: 664 case CB_COLOR9_INFO:
628 case CB_COLOR10_INFO: 665 case CB_COLOR10_INFO:
629 case CB_COLOR11_INFO: 666 case CB_COLOR11_INFO:
630 r = evergreen_cs_packet_next_reloc(p, &reloc);
631 if (r) {
632 dev_warn(p->dev, "bad SET_CONTEXT_REG "
633 "0x%04X\n", reg);
634 return -EINVAL;
635 }
636 tmp = ((reg - CB_COLOR8_INFO) / 0x1c) + 8; 667 tmp = ((reg - CB_COLOR8_INFO) / 0x1c) + 8;
637 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx); 668 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
638 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { 669 if (!p->keep_tiling_flags) {
639 ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); 670 r = evergreen_cs_packet_next_reloc(p, &reloc);
640 track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); 671 if (r) {
641 } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { 672 dev_warn(p->dev, "bad SET_CONTEXT_REG "
642 ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); 673 "0x%04X\n", reg);
643 track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); 674 return -EINVAL;
675 }
676 ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
677 track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
644 } 678 }
645 break; 679 break;
646 case CB_COLOR0_PITCH: 680 case CB_COLOR0_PITCH:
@@ -695,6 +729,16 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
695 case CB_COLOR9_ATTRIB: 729 case CB_COLOR9_ATTRIB:
696 case CB_COLOR10_ATTRIB: 730 case CB_COLOR10_ATTRIB:
697 case CB_COLOR11_ATTRIB: 731 case CB_COLOR11_ATTRIB:
732 r = evergreen_cs_packet_next_reloc(p, &reloc);
733 if (r) {
734 dev_warn(p->dev, "bad SET_CONTEXT_REG "
735 "0x%04X\n", reg);
736 return -EINVAL;
737 }
738 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
739 ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
740 ib[idx] |= CB_TILE_SPLIT(evergreen_cs_get_tile_split(track->row_size));
741 }
698 break; 742 break;
699 case CB_COLOR0_DIM: 743 case CB_COLOR0_DIM:
700 case CB_COLOR1_DIM: 744 case CB_COLOR1_DIM:
@@ -1311,10 +1355,16 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
1311 return -EINVAL; 1355 return -EINVAL;
1312 } 1356 }
1313 ib[idx+1+(i*8)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1357 ib[idx+1+(i*8)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1314 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 1358 if (!p->keep_tiling_flags) {
1315 ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_2D_TILED_THIN1); 1359 ib[idx+1+(i*8)+1] |=
1316 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 1360 TEX_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
1317 ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_1D_TILED_THIN1); 1361 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
1362 ib[idx+1+(i*8)+6] |=
1363 TEX_TILE_SPLIT(evergreen_cs_get_tile_split(track->row_size));
1364 ib[idx+1+(i*8)+7] |=
1365 TEX_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
1366 }
1367 }
1318 texture = reloc->robj; 1368 texture = reloc->robj;
1319 /* tex mip base */ 1369 /* tex mip base */
1320 r = evergreen_cs_packet_next_reloc(p, &reloc); 1370 r = evergreen_cs_packet_next_reloc(p, &reloc);
@@ -1414,6 +1464,7 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
1414{ 1464{
1415 struct radeon_cs_packet pkt; 1465 struct radeon_cs_packet pkt;
1416 struct evergreen_cs_track *track; 1466 struct evergreen_cs_track *track;
1467 u32 tmp;
1417 int r; 1468 int r;
1418 1469
1419 if (p->track == NULL) { 1470 if (p->track == NULL) {
@@ -1422,9 +1473,63 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
1422 if (track == NULL) 1473 if (track == NULL)
1423 return -ENOMEM; 1474 return -ENOMEM;
1424 evergreen_cs_track_init(track); 1475 evergreen_cs_track_init(track);
1425 track->npipes = p->rdev->config.evergreen.tiling_npipes; 1476 if (p->rdev->family >= CHIP_CAYMAN)
1426 track->nbanks = p->rdev->config.evergreen.tiling_nbanks; 1477 tmp = p->rdev->config.cayman.tile_config;
1427 track->group_size = p->rdev->config.evergreen.tiling_group_size; 1478 else
1479 tmp = p->rdev->config.evergreen.tile_config;
1480
1481 switch (tmp & 0xf) {
1482 case 0:
1483 track->npipes = 1;
1484 break;
1485 case 1:
1486 default:
1487 track->npipes = 2;
1488 break;
1489 case 2:
1490 track->npipes = 4;
1491 break;
1492 case 3:
1493 track->npipes = 8;
1494 break;
1495 }
1496
1497 switch ((tmp & 0xf0) >> 4) {
1498 case 0:
1499 track->nbanks = 4;
1500 break;
1501 case 1:
1502 default:
1503 track->nbanks = 8;
1504 break;
1505 case 2:
1506 track->nbanks = 16;
1507 break;
1508 }
1509
1510 switch ((tmp & 0xf00) >> 8) {
1511 case 0:
1512 track->group_size = 256;
1513 break;
1514 case 1:
1515 default:
1516 track->group_size = 512;
1517 break;
1518 }
1519
1520 switch ((tmp & 0xf000) >> 12) {
1521 case 0:
1522 track->row_size = 1;
1523 break;
1524 case 1:
1525 default:
1526 track->row_size = 2;
1527 break;
1528 case 2:
1529 track->row_size = 4;
1530 break;
1531 }
1532
1428 p->track = track; 1533 p->track = track;
1429 } 1534 }
1430 do { 1535 do {
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index c781c92c3451..7d7f2155e34c 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -42,6 +42,17 @@
42# define EVERGREEN_GRPH_DEPTH_8BPP 0 42# define EVERGREEN_GRPH_DEPTH_8BPP 0
43# define EVERGREEN_GRPH_DEPTH_16BPP 1 43# define EVERGREEN_GRPH_DEPTH_16BPP 1
44# define EVERGREEN_GRPH_DEPTH_32BPP 2 44# define EVERGREEN_GRPH_DEPTH_32BPP 2
45# define EVERGREEN_GRPH_NUM_BANKS(x) (((x) & 0x3) << 2)
46# define EVERGREEN_ADDR_SURF_2_BANK 0
47# define EVERGREEN_ADDR_SURF_4_BANK 1
48# define EVERGREEN_ADDR_SURF_8_BANK 2
49# define EVERGREEN_ADDR_SURF_16_BANK 3
50# define EVERGREEN_GRPH_Z(x) (((x) & 0x3) << 4)
51# define EVERGREEN_GRPH_BANK_WIDTH(x) (((x) & 0x3) << 6)
52# define EVERGREEN_ADDR_SURF_BANK_WIDTH_1 0
53# define EVERGREEN_ADDR_SURF_BANK_WIDTH_2 1
54# define EVERGREEN_ADDR_SURF_BANK_WIDTH_4 2
55# define EVERGREEN_ADDR_SURF_BANK_WIDTH_8 3
45# define EVERGREEN_GRPH_FORMAT(x) (((x) & 0x7) << 8) 56# define EVERGREEN_GRPH_FORMAT(x) (((x) & 0x7) << 8)
46/* 8 BPP */ 57/* 8 BPP */
47# define EVERGREEN_GRPH_FORMAT_INDEXED 0 58# define EVERGREEN_GRPH_FORMAT_INDEXED 0
@@ -61,6 +72,24 @@
61# define EVERGREEN_GRPH_FORMAT_8B_BGRA1010102 5 72# define EVERGREEN_GRPH_FORMAT_8B_BGRA1010102 5
62# define EVERGREEN_GRPH_FORMAT_RGB111110 6 73# define EVERGREEN_GRPH_FORMAT_RGB111110 6
63# define EVERGREEN_GRPH_FORMAT_BGR101111 7 74# define EVERGREEN_GRPH_FORMAT_BGR101111 7
75# define EVERGREEN_GRPH_BANK_HEIGHT(x) (((x) & 0x3) << 11)
76# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_1 0
77# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_2 1
78# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_4 2
79# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_8 3
80# define EVERGREEN_GRPH_TILE_SPLIT(x) (((x) & 0x7) << 13)
81# define EVERGREEN_ADDR_SURF_TILE_SPLIT_64B 0
82# define EVERGREEN_ADDR_SURF_TILE_SPLIT_128B 1
83# define EVERGREEN_ADDR_SURF_TILE_SPLIT_256B 2
84# define EVERGREEN_ADDR_SURF_TILE_SPLIT_512B 3
85# define EVERGREEN_ADDR_SURF_TILE_SPLIT_1KB 4
86# define EVERGREEN_ADDR_SURF_TILE_SPLIT_2KB 5
87# define EVERGREEN_ADDR_SURF_TILE_SPLIT_4KB 6
88# define EVERGREEN_GRPH_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 18)
89# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1 0
90# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2 1
91# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4 2
92# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8 3
64# define EVERGREEN_GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20) 93# define EVERGREEN_GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20)
65# define EVERGREEN_GRPH_ARRAY_LINEAR_GENERAL 0 94# define EVERGREEN_GRPH_ARRAY_LINEAR_GENERAL 0
66# define EVERGREEN_GRPH_ARRAY_LINEAR_ALIGNED 1 95# define EVERGREEN_GRPH_ARRAY_LINEAR_ALIGNED 1
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index b937c49054d9..e00039e59a75 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -899,6 +899,10 @@
899#define DB_HTILE_DATA_BASE 0x28014 899#define DB_HTILE_DATA_BASE 0x28014
900#define DB_Z_INFO 0x28040 900#define DB_Z_INFO 0x28040
901# define Z_ARRAY_MODE(x) ((x) << 4) 901# define Z_ARRAY_MODE(x) ((x) << 4)
902# define DB_TILE_SPLIT(x) (((x) & 0x7) << 8)
903# define DB_NUM_BANKS(x) (((x) & 0x3) << 12)
904# define DB_BANK_WIDTH(x) (((x) & 0x3) << 16)
905# define DB_BANK_HEIGHT(x) (((x) & 0x3) << 20)
902#define DB_STENCIL_INFO 0x28044 906#define DB_STENCIL_INFO 0x28044
903#define DB_Z_READ_BASE 0x28048 907#define DB_Z_READ_BASE 0x28048
904#define DB_STENCIL_READ_BASE 0x2804c 908#define DB_STENCIL_READ_BASE 0x2804c
@@ -951,6 +955,29 @@
951# define CB_SF_EXPORT_FULL 0 955# define CB_SF_EXPORT_FULL 0
952# define CB_SF_EXPORT_NORM 1 956# define CB_SF_EXPORT_NORM 1
953#define CB_COLOR0_ATTRIB 0x28c74 957#define CB_COLOR0_ATTRIB 0x28c74
958# define CB_TILE_SPLIT(x) (((x) & 0x7) << 5)
959# define ADDR_SURF_TILE_SPLIT_64B 0
960# define ADDR_SURF_TILE_SPLIT_128B 1
961# define ADDR_SURF_TILE_SPLIT_256B 2
962# define ADDR_SURF_TILE_SPLIT_512B 3
963# define ADDR_SURF_TILE_SPLIT_1KB 4
964# define ADDR_SURF_TILE_SPLIT_2KB 5
965# define ADDR_SURF_TILE_SPLIT_4KB 6
966# define CB_NUM_BANKS(x) (((x) & 0x3) << 10)
967# define ADDR_SURF_2_BANK 0
968# define ADDR_SURF_4_BANK 1
969# define ADDR_SURF_8_BANK 2
970# define ADDR_SURF_16_BANK 3
971# define CB_BANK_WIDTH(x) (((x) & 0x3) << 13)
972# define ADDR_SURF_BANK_WIDTH_1 0
973# define ADDR_SURF_BANK_WIDTH_2 1
974# define ADDR_SURF_BANK_WIDTH_4 2
975# define ADDR_SURF_BANK_WIDTH_8 3
976# define CB_BANK_HEIGHT(x) (((x) & 0x3) << 16)
977# define ADDR_SURF_BANK_HEIGHT_1 0
978# define ADDR_SURF_BANK_HEIGHT_2 1
979# define ADDR_SURF_BANK_HEIGHT_4 2
980# define ADDR_SURF_BANK_HEIGHT_8 3
954#define CB_COLOR0_DIM 0x28c78 981#define CB_COLOR0_DIM 0x28c78
955/* only CB0-7 blocks have these regs */ 982/* only CB0-7 blocks have these regs */
956#define CB_COLOR0_CMASK 0x28c7c 983#define CB_COLOR0_CMASK 0x28c7c
@@ -1137,7 +1164,11 @@
1137# define SQ_SEL_1 5 1164# define SQ_SEL_1 5
1138#define SQ_TEX_RESOURCE_WORD5_0 0x30014 1165#define SQ_TEX_RESOURCE_WORD5_0 0x30014
1139#define SQ_TEX_RESOURCE_WORD6_0 0x30018 1166#define SQ_TEX_RESOURCE_WORD6_0 0x30018
1167# define TEX_TILE_SPLIT(x) (((x) & 0x7) << 29)
1140#define SQ_TEX_RESOURCE_WORD7_0 0x3001c 1168#define SQ_TEX_RESOURCE_WORD7_0 0x3001c
1169# define TEX_BANK_WIDTH(x) (((x) & 0x3) << 8)
1170# define TEX_BANK_HEIGHT(x) (((x) & 0x3) << 10)
1171# define TEX_NUM_BANKS(x) (((x) & 0x3) << 16)
1141 1172
1142#define SQ_VTX_CONSTANT_WORD0_0 0x30000 1173#define SQ_VTX_CONSTANT_WORD0_0 0x30000
1143#define SQ_VTX_CONSTANT_WORD1_0 0x30004 1174#define SQ_VTX_CONSTANT_WORD1_0 0x30004
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index ad158ea49901..bfc08f6320f8 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -187,13 +187,18 @@ u32 r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
187{ 187{
188 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; 188 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
189 u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK; 189 u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK;
190 int i;
190 191
191 /* Lock the graphics update lock */ 192 /* Lock the graphics update lock */
192 /* update the scanout addresses */ 193 /* update the scanout addresses */
193 WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp); 194 WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
194 195
195 /* Wait for update_pending to go high. */ 196 /* Wait for update_pending to go high. */
196 while (!(RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET)); 197 for (i = 0; i < rdev->usec_timeout; i++) {
198 if (RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET)
199 break;
200 udelay(1);
201 }
197 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); 202 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
198 203
199 /* Unlock the lock, so double-buffering can take place inside vblank */ 204 /* Unlock the lock, so double-buffering can take place inside vblank */
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 400b26df652a..c93bc64707e1 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -701,16 +701,21 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
701 return r; 701 return r;
702 } 702 }
703 703
704 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 704 if (p->keep_tiling_flags) {
705 tile_flags |= R300_TXO_MACRO_TILE; 705 ib[idx] = (idx_value & 31) | /* keep the 1st 5 bits */
706 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 706 ((idx_value & ~31) + (u32)reloc->lobj.gpu_offset);
707 tile_flags |= R300_TXO_MICRO_TILE; 707 } else {
708 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE) 708 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
709 tile_flags |= R300_TXO_MICRO_TILE_SQUARE; 709 tile_flags |= R300_TXO_MACRO_TILE;
710 710 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
711 tmp = idx_value + ((u32)reloc->lobj.gpu_offset); 711 tile_flags |= R300_TXO_MICRO_TILE;
712 tmp |= tile_flags; 712 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
713 ib[idx] = tmp; 713 tile_flags |= R300_TXO_MICRO_TILE_SQUARE;
714
715 tmp = idx_value + ((u32)reloc->lobj.gpu_offset);
716 tmp |= tile_flags;
717 ib[idx] = tmp;
718 }
714 track->textures[i].robj = reloc->robj; 719 track->textures[i].robj = reloc->robj;
715 track->tex_dirty = true; 720 track->tex_dirty = true;
716 break; 721 break;
@@ -760,24 +765,26 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
760 /* RB3D_COLORPITCH1 */ 765 /* RB3D_COLORPITCH1 */
761 /* RB3D_COLORPITCH2 */ 766 /* RB3D_COLORPITCH2 */
762 /* RB3D_COLORPITCH3 */ 767 /* RB3D_COLORPITCH3 */
763 r = r100_cs_packet_next_reloc(p, &reloc); 768 if (!p->keep_tiling_flags) {
764 if (r) { 769 r = r100_cs_packet_next_reloc(p, &reloc);
765 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 770 if (r) {
766 idx, reg); 771 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
767 r100_cs_dump_packet(p, pkt); 772 idx, reg);
768 return r; 773 r100_cs_dump_packet(p, pkt);
769 } 774 return r;
775 }
770 776
771 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 777 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
772 tile_flags |= R300_COLOR_TILE_ENABLE; 778 tile_flags |= R300_COLOR_TILE_ENABLE;
773 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 779 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
774 tile_flags |= R300_COLOR_MICROTILE_ENABLE; 780 tile_flags |= R300_COLOR_MICROTILE_ENABLE;
775 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE) 781 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
776 tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE; 782 tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE;
777 783
778 tmp = idx_value & ~(0x7 << 16); 784 tmp = idx_value & ~(0x7 << 16);
779 tmp |= tile_flags; 785 tmp |= tile_flags;
780 ib[idx] = tmp; 786 ib[idx] = tmp;
787 }
781 i = (reg - 0x4E38) >> 2; 788 i = (reg - 0x4E38) >> 2;
782 track->cb[i].pitch = idx_value & 0x3FFE; 789 track->cb[i].pitch = idx_value & 0x3FFE;
783 switch (((idx_value >> 21) & 0xF)) { 790 switch (((idx_value >> 21) & 0xF)) {
@@ -843,25 +850,26 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
843 break; 850 break;
844 case 0x4F24: 851 case 0x4F24:
845 /* ZB_DEPTHPITCH */ 852 /* ZB_DEPTHPITCH */
846 r = r100_cs_packet_next_reloc(p, &reloc); 853 if (!p->keep_tiling_flags) {
847 if (r) { 854 r = r100_cs_packet_next_reloc(p, &reloc);
848 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 855 if (r) {
849 idx, reg); 856 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
850 r100_cs_dump_packet(p, pkt); 857 idx, reg);
851 return r; 858 r100_cs_dump_packet(p, pkt);
852 } 859 return r;
853 860 }
854 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
855 tile_flags |= R300_DEPTHMACROTILE_ENABLE;
856 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
857 tile_flags |= R300_DEPTHMICROTILE_TILED;
858 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
859 tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE;
860 861
861 tmp = idx_value & ~(0x7 << 16); 862 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
862 tmp |= tile_flags; 863 tile_flags |= R300_DEPTHMACROTILE_ENABLE;
863 ib[idx] = tmp; 864 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
865 tile_flags |= R300_DEPTHMICROTILE_TILED;
866 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
867 tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE;
864 868
869 tmp = idx_value & ~(0x7 << 16);
870 tmp |= tile_flags;
871 ib[idx] = tmp;
872 }
865 track->zb.pitch = idx_value & 0x3FFC; 873 track->zb.pitch = idx_value & 0x3FFC;
866 track->zb_dirty = true; 874 track->zb_dirty = true;
867 break; 875 break;
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 19afc43ad173..9cdda0b3b081 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -288,24 +288,6 @@ void r600_pm_get_dynpm_state(struct radeon_device *rdev)
288 pcie_lanes); 288 pcie_lanes);
289} 289}
290 290
291static int r600_pm_get_type_index(struct radeon_device *rdev,
292 enum radeon_pm_state_type ps_type,
293 int instance)
294{
295 int i;
296 int found_instance = -1;
297
298 for (i = 0; i < rdev->pm.num_power_states; i++) {
299 if (rdev->pm.power_state[i].type == ps_type) {
300 found_instance++;
301 if (found_instance == instance)
302 return i;
303 }
304 }
305 /* return default if no match */
306 return rdev->pm.default_power_state_index;
307}
308
309void rs780_pm_init_profile(struct radeon_device *rdev) 291void rs780_pm_init_profile(struct radeon_device *rdev)
310{ 292{
311 if (rdev->pm.num_power_states == 2) { 293 if (rdev->pm.num_power_states == 2) {
@@ -421,6 +403,8 @@ void rs780_pm_init_profile(struct radeon_device *rdev)
421 403
422void r600_pm_init_profile(struct radeon_device *rdev) 404void r600_pm_init_profile(struct radeon_device *rdev)
423{ 405{
406 int idx;
407
424 if (rdev->family == CHIP_R600) { 408 if (rdev->family == CHIP_R600) {
425 /* XXX */ 409 /* XXX */
426 /* default */ 410 /* default */
@@ -502,81 +486,43 @@ void r600_pm_init_profile(struct radeon_device *rdev)
502 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; 486 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
503 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2; 487 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
504 /* low sh */ 488 /* low sh */
505 if (rdev->flags & RADEON_IS_MOBILITY) { 489 if (rdev->flags & RADEON_IS_MOBILITY)
506 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 490 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
507 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); 491 else
508 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 492 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
509 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); 493 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
510 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; 494 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
511 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; 495 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
512 } else { 496 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
513 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx =
514 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
515 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx =
516 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
517 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
518 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
519 }
520 /* mid sh */ 497 /* mid sh */
521 if (rdev->flags & RADEON_IS_MOBILITY) { 498 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
522 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 499 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
523 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); 500 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
524 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 501 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
525 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
526 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
527 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
528 } else {
529 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx =
530 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
531 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx =
532 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
533 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
534 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
535 }
536 /* high sh */ 502 /* high sh */
537 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 503 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
538 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); 504 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
539 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 505 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
540 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
541 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; 506 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
542 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2; 507 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
543 /* low mh */ 508 /* low mh */
544 if (rdev->flags & RADEON_IS_MOBILITY) { 509 if (rdev->flags & RADEON_IS_MOBILITY)
545 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 510 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
546 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1); 511 else
547 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 512 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
548 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1); 513 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
549 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; 514 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
550 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; 515 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
551 } else { 516 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
552 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx =
553 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
554 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx =
555 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
556 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
557 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
558 }
559 /* mid mh */ 517 /* mid mh */
560 if (rdev->flags & RADEON_IS_MOBILITY) { 518 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
561 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 519 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
562 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1); 520 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
563 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 521 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
564 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
565 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
566 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
567 } else {
568 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx =
569 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
570 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx =
571 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
572 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
573 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
574 }
575 /* high mh */ 522 /* high mh */
576 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 523 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
577 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); 524 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
578 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 525 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
579 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
580 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; 526 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
581 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2; 527 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
582 } 528 }
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 0a2e023c1557..cb1acffd2430 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -941,7 +941,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
941 track->db_depth_control = radeon_get_ib_value(p, idx); 941 track->db_depth_control = radeon_get_ib_value(p, idx);
942 break; 942 break;
943 case R_028010_DB_DEPTH_INFO: 943 case R_028010_DB_DEPTH_INFO:
944 if (r600_cs_packet_next_is_pkt3_nop(p)) { 944 if (!p->keep_tiling_flags &&
945 r600_cs_packet_next_is_pkt3_nop(p)) {
945 r = r600_cs_packet_next_reloc(p, &reloc); 946 r = r600_cs_packet_next_reloc(p, &reloc);
946 if (r) { 947 if (r) {
947 dev_warn(p->dev, "bad SET_CONTEXT_REG " 948 dev_warn(p->dev, "bad SET_CONTEXT_REG "
@@ -992,7 +993,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
992 case R_0280B4_CB_COLOR5_INFO: 993 case R_0280B4_CB_COLOR5_INFO:
993 case R_0280B8_CB_COLOR6_INFO: 994 case R_0280B8_CB_COLOR6_INFO:
994 case R_0280BC_CB_COLOR7_INFO: 995 case R_0280BC_CB_COLOR7_INFO:
995 if (r600_cs_packet_next_is_pkt3_nop(p)) { 996 if (!p->keep_tiling_flags &&
997 r600_cs_packet_next_is_pkt3_nop(p)) {
996 r = r600_cs_packet_next_reloc(p, &reloc); 998 r = r600_cs_packet_next_reloc(p, &reloc);
997 if (r) { 999 if (r) {
998 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); 1000 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
@@ -1291,10 +1293,12 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
1291 mip_offset <<= 8; 1293 mip_offset <<= 8;
1292 1294
1293 word0 = radeon_get_ib_value(p, idx + 0); 1295 word0 = radeon_get_ib_value(p, idx + 0);
1294 if (tiling_flags & RADEON_TILING_MACRO) 1296 if (!p->keep_tiling_flags) {
1295 word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1); 1297 if (tiling_flags & RADEON_TILING_MACRO)
1296 else if (tiling_flags & RADEON_TILING_MICRO) 1298 word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
1297 word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1); 1299 else if (tiling_flags & RADEON_TILING_MICRO)
1300 word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
1301 }
1298 word1 = radeon_get_ib_value(p, idx + 1); 1302 word1 = radeon_get_ib_value(p, idx + 1);
1299 w0 = G_038000_TEX_WIDTH(word0) + 1; 1303 w0 = G_038000_TEX_WIDTH(word0) + 1;
1300 h0 = G_038004_TEX_HEIGHT(word1) + 1; 1304 h0 = G_038004_TEX_HEIGHT(word1) + 1;
@@ -1621,10 +1625,12 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
1621 return -EINVAL; 1625 return -EINVAL;
1622 } 1626 }
1623 base_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1627 base_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1624 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 1628 if (!p->keep_tiling_flags) {
1625 ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1); 1629 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
1626 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 1630 ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
1627 ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1); 1631 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
1632 ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
1633 }
1628 texture = reloc->robj; 1634 texture = reloc->robj;
1629 /* tex mip base */ 1635 /* tex mip base */
1630 r = r600_cs_packet_next_reloc(p, &reloc); 1636 r = r600_cs_packet_next_reloc(p, &reloc);
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 83b2e016a4a1..c8f4dbd2d17c 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -610,7 +610,8 @@ struct radeon_cs_parser {
610 struct radeon_ib *ib; 610 struct radeon_ib *ib;
611 void *track; 611 void *track;
612 unsigned family; 612 unsigned family;
613 int parser_error; 613 int parser_error;
614 bool keep_tiling_flags;
614}; 615};
615 616
616extern int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx); 617extern int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx);
@@ -783,8 +784,7 @@ struct radeon_pm_clock_info {
783 784
784struct radeon_power_state { 785struct radeon_power_state {
785 enum radeon_pm_state_type type; 786 enum radeon_pm_state_type type;
786 /* XXX: use a define for num clock modes */ 787 struct radeon_pm_clock_info *clock_info;
787 struct radeon_pm_clock_info clock_info[8];
788 /* number of valid clock modes in this power state */ 788 /* number of valid clock modes in this power state */
789 int num_clock_modes; 789 int num_clock_modes;
790 struct radeon_pm_clock_info *default_clock_mode; 790 struct radeon_pm_clock_info *default_clock_mode;
@@ -854,6 +854,9 @@ struct radeon_pm {
854 struct device *int_hwmon_dev; 854 struct device *int_hwmon_dev;
855}; 855};
856 856
857int radeon_pm_get_type_index(struct radeon_device *rdev,
858 enum radeon_pm_state_type ps_type,
859 int instance);
857 860
858/* 861/*
859 * Benchmarking 862 * Benchmarking
@@ -1141,6 +1144,48 @@ struct r600_vram_scratch {
1141 u64 gpu_addr; 1144 u64 gpu_addr;
1142}; 1145};
1143 1146
1147
1148/*
1149 * Mutex which allows recursive locking from the same process.
1150 */
1151struct radeon_mutex {
1152 struct mutex mutex;
1153 struct task_struct *owner;
1154 int level;
1155};
1156
1157static inline void radeon_mutex_init(struct radeon_mutex *mutex)
1158{
1159 mutex_init(&mutex->mutex);
1160 mutex->owner = NULL;
1161 mutex->level = 0;
1162}
1163
1164static inline void radeon_mutex_lock(struct radeon_mutex *mutex)
1165{
1166 if (mutex_trylock(&mutex->mutex)) {
1167 /* The mutex was unlocked before, so it's ours now */
1168 mutex->owner = current;
1169 } else if (mutex->owner != current) {
1170 /* Another process locked the mutex, take it */
1171 mutex_lock(&mutex->mutex);
1172 mutex->owner = current;
1173 }
1174 /* Otherwise the mutex was already locked by this process */
1175
1176 mutex->level++;
1177}
1178
1179static inline void radeon_mutex_unlock(struct radeon_mutex *mutex)
1180{
1181 if (--mutex->level > 0)
1182 return;
1183
1184 mutex->owner = NULL;
1185 mutex_unlock(&mutex->mutex);
1186}
1187
1188
1144/* 1189/*
1145 * Core structure, functions and helpers. 1190 * Core structure, functions and helpers.
1146 */ 1191 */
@@ -1196,7 +1241,7 @@ struct radeon_device {
1196 struct radeon_gem gem; 1241 struct radeon_gem gem;
1197 struct radeon_pm pm; 1242 struct radeon_pm pm;
1198 uint32_t bios_scratch[RADEON_BIOS_NUM_SCRATCH]; 1243 uint32_t bios_scratch[RADEON_BIOS_NUM_SCRATCH];
1199 struct mutex cs_mutex; 1244 struct radeon_mutex cs_mutex;
1200 struct radeon_wb wb; 1245 struct radeon_wb wb;
1201 struct radeon_dummy_page dummy_page; 1246 struct radeon_dummy_page dummy_page;
1202 bool gpu_lockup; 1247 bool gpu_lockup;
diff --git a/drivers/gpu/drm/radeon/radeon_acpi.c b/drivers/gpu/drm/radeon/radeon_acpi.c
index 3f6636bb2d7f..3516a6081dcf 100644
--- a/drivers/gpu/drm/radeon/radeon_acpi.c
+++ b/drivers/gpu/drm/radeon/radeon_acpi.c
@@ -35,7 +35,8 @@ static int radeon_atif_call(acpi_handle handle)
35 35
36 /* Fail only if calling the method fails and ATIF is supported */ 36 /* Fail only if calling the method fails and ATIF is supported */
37 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { 37 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
38 printk(KERN_DEBUG "failed to evaluate ATIF got %s\n", acpi_format_exception(status)); 38 DRM_DEBUG_DRIVER("failed to evaluate ATIF got %s\n",
39 acpi_format_exception(status));
39 kfree(buffer.pointer); 40 kfree(buffer.pointer);
40 return 1; 41 return 1;
41 } 42 }
@@ -50,13 +51,13 @@ int radeon_acpi_init(struct radeon_device *rdev)
50 acpi_handle handle; 51 acpi_handle handle;
51 int ret; 52 int ret;
52 53
53 /* No need to proceed if we're sure that ATIF is not supported */
54 if (!ASIC_IS_AVIVO(rdev) || !rdev->bios)
55 return 0;
56
57 /* Get the device handle */ 54 /* Get the device handle */
58 handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev); 55 handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev);
59 56
57 /* No need to proceed if we're sure that ATIF is not supported */
58 if (!ASIC_IS_AVIVO(rdev) || !rdev->bios || !handle)
59 return 0;
60
60 /* Call the ATIF method */ 61 /* Call the ATIF method */
61 ret = radeon_atif_call(handle); 62 ret = radeon_atif_call(handle);
62 if (ret) 63 if (ret)
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index e2944566ffea..a2e1eae114ef 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -834,7 +834,7 @@ static struct radeon_asic sumo_asic = {
834 .pm_misc = &evergreen_pm_misc, 834 .pm_misc = &evergreen_pm_misc,
835 .pm_prepare = &evergreen_pm_prepare, 835 .pm_prepare = &evergreen_pm_prepare,
836 .pm_finish = &evergreen_pm_finish, 836 .pm_finish = &evergreen_pm_finish,
837 .pm_init_profile = &rs780_pm_init_profile, 837 .pm_init_profile = &sumo_pm_init_profile,
838 .pm_get_dynpm_state = &r600_pm_get_dynpm_state, 838 .pm_get_dynpm_state = &r600_pm_get_dynpm_state,
839 .pre_page_flip = &evergreen_pre_page_flip, 839 .pre_page_flip = &evergreen_pre_page_flip,
840 .page_flip = &evergreen_page_flip, 840 .page_flip = &evergreen_page_flip,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 85f14f0337e4..59914842a729 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -413,6 +413,7 @@ extern int evergreen_cs_parse(struct radeon_cs_parser *p);
413extern void evergreen_pm_misc(struct radeon_device *rdev); 413extern void evergreen_pm_misc(struct radeon_device *rdev);
414extern void evergreen_pm_prepare(struct radeon_device *rdev); 414extern void evergreen_pm_prepare(struct radeon_device *rdev);
415extern void evergreen_pm_finish(struct radeon_device *rdev); 415extern void evergreen_pm_finish(struct radeon_device *rdev);
416extern void sumo_pm_init_profile(struct radeon_device *rdev);
416extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc); 417extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc);
417extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base); 418extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
418extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc); 419extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc);
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 08d0b94332e6..d24baf30efcb 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -62,6 +62,87 @@ union atom_supported_devices {
62 struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 info_2d1; 62 struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 info_2d1;
63}; 63};
64 64
65static void radeon_lookup_i2c_gpio_quirks(struct radeon_device *rdev,
66 ATOM_GPIO_I2C_ASSIGMENT *gpio,
67 u8 index)
68{
69 /* r4xx mask is technically not used by the hw, so patch in the legacy mask bits */
70 if ((rdev->family == CHIP_R420) ||
71 (rdev->family == CHIP_R423) ||
72 (rdev->family == CHIP_RV410)) {
73 if ((le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0018) ||
74 (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0019) ||
75 (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x001a)) {
76 gpio->ucClkMaskShift = 0x19;
77 gpio->ucDataMaskShift = 0x18;
78 }
79 }
80
81 /* some evergreen boards have bad data for this entry */
82 if (ASIC_IS_DCE4(rdev)) {
83 if ((index == 7) &&
84 (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) &&
85 (gpio->sucI2cId.ucAccess == 0)) {
86 gpio->sucI2cId.ucAccess = 0x97;
87 gpio->ucDataMaskShift = 8;
88 gpio->ucDataEnShift = 8;
89 gpio->ucDataY_Shift = 8;
90 gpio->ucDataA_Shift = 8;
91 }
92 }
93
94 /* some DCE3 boards have bad data for this entry */
95 if (ASIC_IS_DCE3(rdev)) {
96 if ((index == 4) &&
97 (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) &&
98 (gpio->sucI2cId.ucAccess == 0x94))
99 gpio->sucI2cId.ucAccess = 0x14;
100 }
101}
102
103static struct radeon_i2c_bus_rec radeon_get_bus_rec_for_i2c_gpio(ATOM_GPIO_I2C_ASSIGMENT *gpio)
104{
105 struct radeon_i2c_bus_rec i2c;
106
107 memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec));
108
109 i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
110 i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
111 i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
112 i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4;
113 i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4;
114 i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4;
115 i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4;
116 i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4;
117 i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift);
118 i2c.mask_data_mask = (1 << gpio->ucDataMaskShift);
119 i2c.en_clk_mask = (1 << gpio->ucClkEnShift);
120 i2c.en_data_mask = (1 << gpio->ucDataEnShift);
121 i2c.y_clk_mask = (1 << gpio->ucClkY_Shift);
122 i2c.y_data_mask = (1 << gpio->ucDataY_Shift);
123 i2c.a_clk_mask = (1 << gpio->ucClkA_Shift);
124 i2c.a_data_mask = (1 << gpio->ucDataA_Shift);
125
126 if (gpio->sucI2cId.sbfAccess.bfHW_Capable)
127 i2c.hw_capable = true;
128 else
129 i2c.hw_capable = false;
130
131 if (gpio->sucI2cId.ucAccess == 0xa0)
132 i2c.mm_i2c = true;
133 else
134 i2c.mm_i2c = false;
135
136 i2c.i2c_id = gpio->sucI2cId.ucAccess;
137
138 if (i2c.mask_clk_reg)
139 i2c.valid = true;
140 else
141 i2c.valid = false;
142
143 return i2c;
144}
145
65static struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rdev, 146static struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rdev,
66 uint8_t id) 147 uint8_t id)
67{ 148{
@@ -85,59 +166,10 @@ static struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rd
85 for (i = 0; i < num_indices; i++) { 166 for (i = 0; i < num_indices; i++) {
86 gpio = &i2c_info->asGPIO_Info[i]; 167 gpio = &i2c_info->asGPIO_Info[i];
87 168
88 /* some evergreen boards have bad data for this entry */ 169 radeon_lookup_i2c_gpio_quirks(rdev, gpio, i);
89 if (ASIC_IS_DCE4(rdev)) {
90 if ((i == 7) &&
91 (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) &&
92 (gpio->sucI2cId.ucAccess == 0)) {
93 gpio->sucI2cId.ucAccess = 0x97;
94 gpio->ucDataMaskShift = 8;
95 gpio->ucDataEnShift = 8;
96 gpio->ucDataY_Shift = 8;
97 gpio->ucDataA_Shift = 8;
98 }
99 }
100
101 /* some DCE3 boards have bad data for this entry */
102 if (ASIC_IS_DCE3(rdev)) {
103 if ((i == 4) &&
104 (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) &&
105 (gpio->sucI2cId.ucAccess == 0x94))
106 gpio->sucI2cId.ucAccess = 0x14;
107 }
108 170
109 if (gpio->sucI2cId.ucAccess == id) { 171 if (gpio->sucI2cId.ucAccess == id) {
110 i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; 172 i2c = radeon_get_bus_rec_for_i2c_gpio(gpio);
111 i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
112 i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
113 i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4;
114 i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4;
115 i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4;
116 i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4;
117 i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4;
118 i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift);
119 i2c.mask_data_mask = (1 << gpio->ucDataMaskShift);
120 i2c.en_clk_mask = (1 << gpio->ucClkEnShift);
121 i2c.en_data_mask = (1 << gpio->ucDataEnShift);
122 i2c.y_clk_mask = (1 << gpio->ucClkY_Shift);
123 i2c.y_data_mask = (1 << gpio->ucDataY_Shift);
124 i2c.a_clk_mask = (1 << gpio->ucClkA_Shift);
125 i2c.a_data_mask = (1 << gpio->ucDataA_Shift);
126
127 if (gpio->sucI2cId.sbfAccess.bfHW_Capable)
128 i2c.hw_capable = true;
129 else
130 i2c.hw_capable = false;
131
132 if (gpio->sucI2cId.ucAccess == 0xa0)
133 i2c.mm_i2c = true;
134 else
135 i2c.mm_i2c = false;
136
137 i2c.i2c_id = gpio->sucI2cId.ucAccess;
138
139 if (i2c.mask_clk_reg)
140 i2c.valid = true;
141 break; 173 break;
142 } 174 }
143 } 175 }
@@ -157,8 +189,6 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
157 int i, num_indices; 189 int i, num_indices;
158 char stmp[32]; 190 char stmp[32];
159 191
160 memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec));
161
162 if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) { 192 if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) {
163 i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset); 193 i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset);
164 194
@@ -167,60 +197,12 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
167 197
168 for (i = 0; i < num_indices; i++) { 198 for (i = 0; i < num_indices; i++) {
169 gpio = &i2c_info->asGPIO_Info[i]; 199 gpio = &i2c_info->asGPIO_Info[i];
170 i2c.valid = false;
171
172 /* some evergreen boards have bad data for this entry */
173 if (ASIC_IS_DCE4(rdev)) {
174 if ((i == 7) &&
175 (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) &&
176 (gpio->sucI2cId.ucAccess == 0)) {
177 gpio->sucI2cId.ucAccess = 0x97;
178 gpio->ucDataMaskShift = 8;
179 gpio->ucDataEnShift = 8;
180 gpio->ucDataY_Shift = 8;
181 gpio->ucDataA_Shift = 8;
182 }
183 }
184 200
185 /* some DCE3 boards have bad data for this entry */ 201 radeon_lookup_i2c_gpio_quirks(rdev, gpio, i);
186 if (ASIC_IS_DCE3(rdev)) {
187 if ((i == 4) &&
188 (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) &&
189 (gpio->sucI2cId.ucAccess == 0x94))
190 gpio->sucI2cId.ucAccess = 0x14;
191 }
192 202
193 i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; 203 i2c = radeon_get_bus_rec_for_i2c_gpio(gpio);
194 i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
195 i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
196 i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4;
197 i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4;
198 i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4;
199 i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4;
200 i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4;
201 i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift);
202 i2c.mask_data_mask = (1 << gpio->ucDataMaskShift);
203 i2c.en_clk_mask = (1 << gpio->ucClkEnShift);
204 i2c.en_data_mask = (1 << gpio->ucDataEnShift);
205 i2c.y_clk_mask = (1 << gpio->ucClkY_Shift);
206 i2c.y_data_mask = (1 << gpio->ucDataY_Shift);
207 i2c.a_clk_mask = (1 << gpio->ucClkA_Shift);
208 i2c.a_data_mask = (1 << gpio->ucDataA_Shift);
209
210 if (gpio->sucI2cId.sbfAccess.bfHW_Capable)
211 i2c.hw_capable = true;
212 else
213 i2c.hw_capable = false;
214
215 if (gpio->sucI2cId.ucAccess == 0xa0)
216 i2c.mm_i2c = true;
217 else
218 i2c.mm_i2c = false;
219 204
220 i2c.i2c_id = gpio->sucI2cId.ucAccess; 205 if (i2c.valid) {
221
222 if (i2c.mask_clk_reg) {
223 i2c.valid = true;
224 sprintf(stmp, "0x%x", i2c.i2c_id); 206 sprintf(stmp, "0x%x", i2c.i2c_id);
225 rdev->i2c_bus[i] = radeon_i2c_create(rdev->ddev, &i2c, stmp); 207 rdev->i2c_bus[i] = radeon_i2c_create(rdev->ddev, &i2c, stmp);
226 } 208 }
@@ -1996,10 +1978,14 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
1996 return state_index; 1978 return state_index;
1997 /* last mode is usually default, array is low to high */ 1979 /* last mode is usually default, array is low to high */
1998 for (i = 0; i < num_modes; i++) { 1980 for (i = 0; i < num_modes; i++) {
1981 rdev->pm.power_state[state_index].clock_info =
1982 kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
1983 if (!rdev->pm.power_state[state_index].clock_info)
1984 return state_index;
1985 rdev->pm.power_state[state_index].num_clock_modes = 1;
1999 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; 1986 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
2000 switch (frev) { 1987 switch (frev) {
2001 case 1: 1988 case 1:
2002 rdev->pm.power_state[state_index].num_clock_modes = 1;
2003 rdev->pm.power_state[state_index].clock_info[0].mclk = 1989 rdev->pm.power_state[state_index].clock_info[0].mclk =
2004 le16_to_cpu(power_info->info.asPowerPlayInfo[i].usMemoryClock); 1990 le16_to_cpu(power_info->info.asPowerPlayInfo[i].usMemoryClock);
2005 rdev->pm.power_state[state_index].clock_info[0].sclk = 1991 rdev->pm.power_state[state_index].clock_info[0].sclk =
@@ -2035,7 +2021,6 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
2035 state_index++; 2021 state_index++;
2036 break; 2022 break;
2037 case 2: 2023 case 2:
2038 rdev->pm.power_state[state_index].num_clock_modes = 1;
2039 rdev->pm.power_state[state_index].clock_info[0].mclk = 2024 rdev->pm.power_state[state_index].clock_info[0].mclk =
2040 le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMemoryClock); 2025 le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMemoryClock);
2041 rdev->pm.power_state[state_index].clock_info[0].sclk = 2026 rdev->pm.power_state[state_index].clock_info[0].sclk =
@@ -2072,7 +2057,6 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
2072 state_index++; 2057 state_index++;
2073 break; 2058 break;
2074 case 3: 2059 case 3:
2075 rdev->pm.power_state[state_index].num_clock_modes = 1;
2076 rdev->pm.power_state[state_index].clock_info[0].mclk = 2060 rdev->pm.power_state[state_index].clock_info[0].mclk =
2077 le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMemoryClock); 2061 le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMemoryClock);
2078 rdev->pm.power_state[state_index].clock_info[0].sclk = 2062 rdev->pm.power_state[state_index].clock_info[0].sclk =
@@ -2257,7 +2241,7 @@ static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rde
2257 rdev->pm.default_power_state_index = state_index; 2241 rdev->pm.default_power_state_index = state_index;
2258 rdev->pm.power_state[state_index].default_clock_mode = 2242 rdev->pm.power_state[state_index].default_clock_mode =
2259 &rdev->pm.power_state[state_index].clock_info[mode_index - 1]; 2243 &rdev->pm.power_state[state_index].clock_info[mode_index - 1];
2260 if (ASIC_IS_DCE5(rdev)) { 2244 if (ASIC_IS_DCE5(rdev) && !(rdev->flags & RADEON_IS_IGP)) {
2261 /* NI chips post without MC ucode, so default clocks are strobe mode only */ 2245 /* NI chips post without MC ucode, so default clocks are strobe mode only */
2262 rdev->pm.default_sclk = rdev->pm.power_state[state_index].clock_info[0].sclk; 2246 rdev->pm.default_sclk = rdev->pm.power_state[state_index].clock_info[0].sclk;
2263 rdev->pm.default_mclk = rdev->pm.power_state[state_index].clock_info[0].mclk; 2247 rdev->pm.default_mclk = rdev->pm.power_state[state_index].clock_info[0].mclk;
@@ -2377,17 +2361,31 @@ static int radeon_atombios_parse_power_table_4_5(struct radeon_device *rdev)
2377 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset) + 2361 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset) +
2378 (power_state->v1.ucNonClockStateIndex * 2362 (power_state->v1.ucNonClockStateIndex *
2379 power_info->pplib.ucNonClockSize)); 2363 power_info->pplib.ucNonClockSize));
2380 for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) { 2364 rdev->pm.power_state[i].clock_info = kzalloc(sizeof(struct radeon_pm_clock_info) *
2381 clock_info = (union pplib_clock_info *) 2365 ((power_info->pplib.ucStateEntrySize - 1) ?
2382 (mode_info->atom_context->bios + data_offset + 2366 (power_info->pplib.ucStateEntrySize - 1) : 1),
2383 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) + 2367 GFP_KERNEL);
2384 (power_state->v1.ucClockStateIndices[j] * 2368 if (!rdev->pm.power_state[i].clock_info)
2385 power_info->pplib.ucClockInfoSize)); 2369 return state_index;
2386 valid = radeon_atombios_parse_pplib_clock_info(rdev, 2370 if (power_info->pplib.ucStateEntrySize - 1) {
2387 state_index, mode_index, 2371 for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) {
2388 clock_info); 2372 clock_info = (union pplib_clock_info *)
2389 if (valid) 2373 (mode_info->atom_context->bios + data_offset +
2390 mode_index++; 2374 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) +
2375 (power_state->v1.ucClockStateIndices[j] *
2376 power_info->pplib.ucClockInfoSize));
2377 valid = radeon_atombios_parse_pplib_clock_info(rdev,
2378 state_index, mode_index,
2379 clock_info);
2380 if (valid)
2381 mode_index++;
2382 }
2383 } else {
2384 rdev->pm.power_state[state_index].clock_info[0].mclk =
2385 rdev->clock.default_mclk;
2386 rdev->pm.power_state[state_index].clock_info[0].sclk =
2387 rdev->clock.default_sclk;
2388 mode_index++;
2391 } 2389 }
2392 rdev->pm.power_state[state_index].num_clock_modes = mode_index; 2390 rdev->pm.power_state[state_index].num_clock_modes = mode_index;
2393 if (mode_index) { 2391 if (mode_index) {
@@ -2456,18 +2454,32 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
2456 non_clock_array_index = i; /* power_state->v2.nonClockInfoIndex */ 2454 non_clock_array_index = i; /* power_state->v2.nonClockInfoIndex */
2457 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) 2455 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
2458 &non_clock_info_array->nonClockInfo[non_clock_array_index]; 2456 &non_clock_info_array->nonClockInfo[non_clock_array_index];
2459 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { 2457 rdev->pm.power_state[i].clock_info = kzalloc(sizeof(struct radeon_pm_clock_info) *
2460 clock_array_index = power_state->v2.clockInfoIndex[j]; 2458 (power_state->v2.ucNumDPMLevels ?
2461 /* XXX this might be an inagua bug... */ 2459 power_state->v2.ucNumDPMLevels : 1),
2462 if (clock_array_index >= clock_info_array->ucNumEntries) 2460 GFP_KERNEL);
2463 continue; 2461 if (!rdev->pm.power_state[i].clock_info)
2464 clock_info = (union pplib_clock_info *) 2462 return state_index;
2465 &clock_info_array->clockInfo[clock_array_index]; 2463 if (power_state->v2.ucNumDPMLevels) {
2466 valid = radeon_atombios_parse_pplib_clock_info(rdev, 2464 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
2467 state_index, mode_index, 2465 clock_array_index = power_state->v2.clockInfoIndex[j];
2468 clock_info); 2466 /* XXX this might be an inagua bug... */
2469 if (valid) 2467 if (clock_array_index >= clock_info_array->ucNumEntries)
2470 mode_index++; 2468 continue;
2469 clock_info = (union pplib_clock_info *)
2470 &clock_info_array->clockInfo[clock_array_index];
2471 valid = radeon_atombios_parse_pplib_clock_info(rdev,
2472 state_index, mode_index,
2473 clock_info);
2474 if (valid)
2475 mode_index++;
2476 }
2477 } else {
2478 rdev->pm.power_state[state_index].clock_info[0].mclk =
2479 rdev->clock.default_mclk;
2480 rdev->pm.power_state[state_index].clock_info[0].sclk =
2481 rdev->clock.default_sclk;
2482 mode_index++;
2471 } 2483 }
2472 rdev->pm.power_state[state_index].num_clock_modes = mode_index; 2484 rdev->pm.power_state[state_index].num_clock_modes = mode_index;
2473 if (mode_index) { 2485 if (mode_index) {
@@ -2524,19 +2536,23 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
2524 } else { 2536 } else {
2525 rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state), GFP_KERNEL); 2537 rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state), GFP_KERNEL);
2526 if (rdev->pm.power_state) { 2538 if (rdev->pm.power_state) {
2527 /* add the default mode */ 2539 rdev->pm.power_state[0].clock_info =
2528 rdev->pm.power_state[state_index].type = 2540 kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
2529 POWER_STATE_TYPE_DEFAULT; 2541 if (rdev->pm.power_state[0].clock_info) {
2530 rdev->pm.power_state[state_index].num_clock_modes = 1; 2542 /* add the default mode */
2531 rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk; 2543 rdev->pm.power_state[state_index].type =
2532 rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk; 2544 POWER_STATE_TYPE_DEFAULT;
2533 rdev->pm.power_state[state_index].default_clock_mode = 2545 rdev->pm.power_state[state_index].num_clock_modes = 1;
2534 &rdev->pm.power_state[state_index].clock_info[0]; 2546 rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk;
2535 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; 2547 rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk;
2536 rdev->pm.power_state[state_index].pcie_lanes = 16; 2548 rdev->pm.power_state[state_index].default_clock_mode =
2537 rdev->pm.default_power_state_index = state_index; 2549 &rdev->pm.power_state[state_index].clock_info[0];
2538 rdev->pm.power_state[state_index].flags = 0; 2550 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
2539 state_index++; 2551 rdev->pm.power_state[state_index].pcie_lanes = 16;
2552 rdev->pm.default_power_state_index = state_index;
2553 rdev->pm.power_state[state_index].flags = 0;
2554 state_index++;
2555 }
2540 } 2556 }
2541 } 2557 }
2542 2558
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index 5cafc90de7f8..17e1a9b2d8fb 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -98,7 +98,7 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
98 struct radeon_bo *sobj = NULL; 98 struct radeon_bo *sobj = NULL;
99 uint64_t saddr, daddr; 99 uint64_t saddr, daddr;
100 int r, n; 100 int r, n;
101 unsigned int time; 101 int time;
102 102
103 n = RADEON_BENCHMARK_ITERATIONS; 103 n = RADEON_BENCHMARK_ITERATIONS;
104 r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, &sobj); 104 r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, &sobj);
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 8bf83c4b4147..81fc100be7e1 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -2563,14 +2563,17 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev)
2563 2563
2564 /* allocate 2 power states */ 2564 /* allocate 2 power states */
2565 rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * 2, GFP_KERNEL); 2565 rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * 2, GFP_KERNEL);
2566 if (!rdev->pm.power_state) { 2566 if (rdev->pm.power_state) {
2567 rdev->pm.default_power_state_index = state_index; 2567 /* allocate 1 clock mode per state */
2568 rdev->pm.num_power_states = 0; 2568 rdev->pm.power_state[0].clock_info =
2569 2569 kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
2570 rdev->pm.current_power_state_index = rdev->pm.default_power_state_index; 2570 rdev->pm.power_state[1].clock_info =
2571 rdev->pm.current_clock_mode_index = 0; 2571 kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
2572 return; 2572 if (!rdev->pm.power_state[0].clock_info ||
2573 } 2573 !rdev->pm.power_state[1].clock_info)
2574 goto pm_failed;
2575 } else
2576 goto pm_failed;
2574 2577
2575 /* check for a thermal chip */ 2578 /* check for a thermal chip */
2576 offset = combios_get_table_offset(dev, COMBIOS_OVERDRIVE_INFO_TABLE); 2579 offset = combios_get_table_offset(dev, COMBIOS_OVERDRIVE_INFO_TABLE);
@@ -2735,6 +2738,14 @@ default_mode:
2735 2738
2736 rdev->pm.current_power_state_index = rdev->pm.default_power_state_index; 2739 rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
2737 rdev->pm.current_clock_mode_index = 0; 2740 rdev->pm.current_clock_mode_index = 0;
2741 return;
2742
2743pm_failed:
2744 rdev->pm.default_power_state_index = state_index;
2745 rdev->pm.num_power_states = 0;
2746
2747 rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
2748 rdev->pm.current_clock_mode_index = 0;
2738} 2749}
2739 2750
2740void radeon_external_tmds_setup(struct drm_encoder *encoder) 2751void radeon_external_tmds_setup(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index fae00c0d75aa..29afd71e0840 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -93,7 +93,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
93{ 93{
94 struct drm_radeon_cs *cs = data; 94 struct drm_radeon_cs *cs = data;
95 uint64_t *chunk_array_ptr; 95 uint64_t *chunk_array_ptr;
96 unsigned size, i; 96 unsigned size, i, flags = 0;
97 97
98 if (!cs->num_chunks) { 98 if (!cs->num_chunks) {
99 return 0; 99 return 0;
@@ -140,6 +140,10 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
140 if (p->chunks[i].length_dw == 0) 140 if (p->chunks[i].length_dw == 0)
141 return -EINVAL; 141 return -EINVAL;
142 } 142 }
143 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS &&
144 !p->chunks[i].length_dw) {
145 return -EINVAL;
146 }
143 147
144 p->chunks[i].length_dw = user_chunk.length_dw; 148 p->chunks[i].length_dw = user_chunk.length_dw;
145 p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data; 149 p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data;
@@ -155,6 +159,9 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
155 p->chunks[i].user_ptr, size)) { 159 p->chunks[i].user_ptr, size)) {
156 return -EFAULT; 160 return -EFAULT;
157 } 161 }
162 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
163 flags = p->chunks[i].kdata[0];
164 }
158 } else { 165 } else {
159 p->chunks[i].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL); 166 p->chunks[i].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
160 p->chunks[i].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL); 167 p->chunks[i].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
@@ -174,6 +181,8 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
174 p->chunks[p->chunk_ib_idx].length_dw); 181 p->chunks[p->chunk_ib_idx].length_dw);
175 return -EINVAL; 182 return -EINVAL;
176 } 183 }
184
185 p->keep_tiling_flags = (flags & RADEON_CS_KEEP_TILING_FLAGS) != 0;
177 return 0; 186 return 0;
178} 187}
179 188
@@ -222,7 +231,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
222 struct radeon_cs_chunk *ib_chunk; 231 struct radeon_cs_chunk *ib_chunk;
223 int r; 232 int r;
224 233
225 mutex_lock(&rdev->cs_mutex); 234 radeon_mutex_lock(&rdev->cs_mutex);
226 /* initialize parser */ 235 /* initialize parser */
227 memset(&parser, 0, sizeof(struct radeon_cs_parser)); 236 memset(&parser, 0, sizeof(struct radeon_cs_parser));
228 parser.filp = filp; 237 parser.filp = filp;
@@ -233,14 +242,14 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
233 if (r) { 242 if (r) {
234 DRM_ERROR("Failed to initialize parser !\n"); 243 DRM_ERROR("Failed to initialize parser !\n");
235 radeon_cs_parser_fini(&parser, r); 244 radeon_cs_parser_fini(&parser, r);
236 mutex_unlock(&rdev->cs_mutex); 245 radeon_mutex_unlock(&rdev->cs_mutex);
237 return r; 246 return r;
238 } 247 }
239 r = radeon_ib_get(rdev, &parser.ib); 248 r = radeon_ib_get(rdev, &parser.ib);
240 if (r) { 249 if (r) {
241 DRM_ERROR("Failed to get ib !\n"); 250 DRM_ERROR("Failed to get ib !\n");
242 radeon_cs_parser_fini(&parser, r); 251 radeon_cs_parser_fini(&parser, r);
243 mutex_unlock(&rdev->cs_mutex); 252 radeon_mutex_unlock(&rdev->cs_mutex);
244 return r; 253 return r;
245 } 254 }
246 r = radeon_cs_parser_relocs(&parser); 255 r = radeon_cs_parser_relocs(&parser);
@@ -248,7 +257,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
248 if (r != -ERESTARTSYS) 257 if (r != -ERESTARTSYS)
249 DRM_ERROR("Failed to parse relocation %d!\n", r); 258 DRM_ERROR("Failed to parse relocation %d!\n", r);
250 radeon_cs_parser_fini(&parser, r); 259 radeon_cs_parser_fini(&parser, r);
251 mutex_unlock(&rdev->cs_mutex); 260 radeon_mutex_unlock(&rdev->cs_mutex);
252 return r; 261 return r;
253 } 262 }
254 /* Copy the packet into the IB, the parser will read from the 263 /* Copy the packet into the IB, the parser will read from the
@@ -260,14 +269,14 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
260 if (r || parser.parser_error) { 269 if (r || parser.parser_error) {
261 DRM_ERROR("Invalid command stream !\n"); 270 DRM_ERROR("Invalid command stream !\n");
262 radeon_cs_parser_fini(&parser, r); 271 radeon_cs_parser_fini(&parser, r);
263 mutex_unlock(&rdev->cs_mutex); 272 radeon_mutex_unlock(&rdev->cs_mutex);
264 return r; 273 return r;
265 } 274 }
266 r = radeon_cs_finish_pages(&parser); 275 r = radeon_cs_finish_pages(&parser);
267 if (r) { 276 if (r) {
268 DRM_ERROR("Invalid command stream !\n"); 277 DRM_ERROR("Invalid command stream !\n");
269 radeon_cs_parser_fini(&parser, r); 278 radeon_cs_parser_fini(&parser, r);
270 mutex_unlock(&rdev->cs_mutex); 279 radeon_mutex_unlock(&rdev->cs_mutex);
271 return r; 280 return r;
272 } 281 }
273 r = radeon_ib_schedule(rdev, parser.ib); 282 r = radeon_ib_schedule(rdev, parser.ib);
@@ -275,7 +284,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
275 DRM_ERROR("Failed to schedule IB !\n"); 284 DRM_ERROR("Failed to schedule IB !\n");
276 } 285 }
277 radeon_cs_parser_fini(&parser, r); 286 radeon_cs_parser_fini(&parser, r);
278 mutex_unlock(&rdev->cs_mutex); 287 radeon_mutex_unlock(&rdev->cs_mutex);
279 return r; 288 return r;
280} 289}
281 290
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 7c31321df45b..fb347a80486f 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -716,7 +716,7 @@ int radeon_device_init(struct radeon_device *rdev,
716 716
717 /* mutex initialization are all done here so we 717 /* mutex initialization are all done here so we
718 * can recall function without having locking issues */ 718 * can recall function without having locking issues */
719 mutex_init(&rdev->cs_mutex); 719 radeon_mutex_init(&rdev->cs_mutex);
720 mutex_init(&rdev->ib_pool.mutex); 720 mutex_init(&rdev->ib_pool.mutex);
721 mutex_init(&rdev->cp.mutex); 721 mutex_init(&rdev->cp.mutex);
722 mutex_init(&rdev->dc_hw_i2c_mutex); 722 mutex_init(&rdev->dc_hw_i2c_mutex);
@@ -961,6 +961,9 @@ int radeon_gpu_reset(struct radeon_device *rdev)
961 int r; 961 int r;
962 int resched; 962 int resched;
963 963
964 /* Prevent CS ioctl from interfering */
965 radeon_mutex_lock(&rdev->cs_mutex);
966
964 radeon_save_bios_scratch_regs(rdev); 967 radeon_save_bios_scratch_regs(rdev);
965 /* block TTM */ 968 /* block TTM */
966 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); 969 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
@@ -973,10 +976,15 @@ int radeon_gpu_reset(struct radeon_device *rdev)
973 radeon_restore_bios_scratch_regs(rdev); 976 radeon_restore_bios_scratch_regs(rdev);
974 drm_helper_resume_force_mode(rdev->ddev); 977 drm_helper_resume_force_mode(rdev->ddev);
975 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); 978 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
976 return 0;
977 } 979 }
978 /* bad news, how to tell it to userspace ? */ 980
979 dev_info(rdev->dev, "GPU reset failed\n"); 981 radeon_mutex_unlock(&rdev->cs_mutex);
982
983 if (r) {
984 /* bad news, how to tell it to userspace ? */
985 dev_info(rdev->dev, "GPU reset failed\n");
986 }
987
980 return r; 988 return r;
981} 989}
982 990
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index e42c34b98c7b..c3ef1d266f88 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -53,9 +53,10 @@
53 * 2.9.0 - r600 tiling (s3tc,rgtc) working, SET_PREDICATION packet 3 on r600 + eg, backend query 53 * 2.9.0 - r600 tiling (s3tc,rgtc) working, SET_PREDICATION packet 3 on r600 + eg, backend query
54 * 2.10.0 - fusion 2D tiling 54 * 2.10.0 - fusion 2D tiling
55 * 2.11.0 - backend map, initial compute support for the CS checker 55 * 2.11.0 - backend map, initial compute support for the CS checker
56 * 2.12.0 - RADEON_CS_KEEP_TILING_FLAGS
56 */ 57 */
57#define KMS_DRIVER_MAJOR 2 58#define KMS_DRIVER_MAJOR 2
58#define KMS_DRIVER_MINOR 11 59#define KMS_DRIVER_MINOR 12
59#define KMS_DRIVER_PATCHLEVEL 0 60#define KMS_DRIVER_PATCHLEVEL 0
60int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); 61int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
61int radeon_driver_unload_kms(struct drm_device *dev); 62int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 06e413e6a920..4b27efa4405b 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -233,13 +233,12 @@ u16 radeon_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder)
233 switch (radeon_encoder->encoder_id) { 233 switch (radeon_encoder->encoder_id) {
234 case ENCODER_OBJECT_ID_TRAVIS: 234 case ENCODER_OBJECT_ID_TRAVIS:
235 case ENCODER_OBJECT_ID_NUTMEG: 235 case ENCODER_OBJECT_ID_NUTMEG:
236 return true; 236 return radeon_encoder->encoder_id;
237 default: 237 default:
238 return false; 238 return ENCODER_OBJECT_ID_NONE;
239 } 239 }
240 } 240 }
241 241 return ENCODER_OBJECT_ID_NONE;
242 return false;
243} 242}
244 243
245void radeon_panel_mode_fixup(struct drm_encoder *encoder, 244void radeon_panel_mode_fixup(struct drm_encoder *encoder,
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 95b93604b679..25a19c483075 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -991,12 +991,6 @@ static bool radeon_crtc_mode_fixup(struct drm_crtc *crtc,
991 struct drm_display_mode *mode, 991 struct drm_display_mode *mode,
992 struct drm_display_mode *adjusted_mode) 992 struct drm_display_mode *adjusted_mode)
993{ 993{
994 struct drm_device *dev = crtc->dev;
995 struct radeon_device *rdev = dev->dev_private;
996
997 /* adjust pm to upcoming mode change */
998 radeon_pm_compute_clocks(rdev);
999
1000 if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) 994 if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
1001 return false; 995 return false;
1002 return true; 996 return true;
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 6fabe89fa6a1..78a665bd9519 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -53,6 +53,24 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev);
53 53
54#define ACPI_AC_CLASS "ac_adapter" 54#define ACPI_AC_CLASS "ac_adapter"
55 55
56int radeon_pm_get_type_index(struct radeon_device *rdev,
57 enum radeon_pm_state_type ps_type,
58 int instance)
59{
60 int i;
61 int found_instance = -1;
62
63 for (i = 0; i < rdev->pm.num_power_states; i++) {
64 if (rdev->pm.power_state[i].type == ps_type) {
65 found_instance++;
66 if (found_instance == instance)
67 return i;
68 }
69 }
70 /* return default if no match */
71 return rdev->pm.default_power_state_index;
72}
73
56#ifdef CONFIG_ACPI 74#ifdef CONFIG_ACPI
57static int radeon_acpi_event(struct notifier_block *nb, 75static int radeon_acpi_event(struct notifier_block *nb,
58 unsigned long val, 76 unsigned long val,
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 481b99e89f65..b1053d640423 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -62,6 +62,7 @@ u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
62{ 62{
63 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; 63 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
64 u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset); 64 u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
65 int i;
65 66
66 /* Lock the graphics update lock */ 67 /* Lock the graphics update lock */
67 tmp |= AVIVO_D1GRPH_UPDATE_LOCK; 68 tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
@@ -74,7 +75,11 @@ u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
74 (u32)crtc_base); 75 (u32)crtc_base);
75 76
76 /* Wait for update_pending to go high. */ 77 /* Wait for update_pending to go high. */
77 while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)); 78 for (i = 0; i < rdev->usec_timeout; i++) {
79 if (RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)
80 break;
81 udelay(1);
82 }
78 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); 83 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
79 84
80 /* Unlock the lock, so double-buffering can take place inside vblank */ 85 /* Unlock the lock, so double-buffering can take place inside vblank */
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index a983f410ab89..23ae1c60ab3d 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -47,6 +47,7 @@ u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
47{ 47{
48 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; 48 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
49 u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset); 49 u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
50 int i;
50 51
51 /* Lock the graphics update lock */ 52 /* Lock the graphics update lock */
52 tmp |= AVIVO_D1GRPH_UPDATE_LOCK; 53 tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
@@ -66,7 +67,11 @@ u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
66 (u32)crtc_base); 67 (u32)crtc_base);
67 68
68 /* Wait for update_pending to go high. */ 69 /* Wait for update_pending to go high. */
69 while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)); 70 for (i = 0; i < rdev->usec_timeout; i++) {
71 if (RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)
72 break;
73 udelay(1);
74 }
70 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); 75 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
71 76
72 /* Unlock the lock, so double-buffering can take place inside vblank */ 77 /* Unlock the lock, so double-buffering can take place inside vblank */
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 119b6e3ff906..2f0eab66ece6 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -562,10 +562,16 @@ retry:
562 return ret; 562 return ret;
563 563
564 spin_lock(&glob->lru_lock); 564 spin_lock(&glob->lru_lock);
565
566 if (unlikely(list_empty(&bo->ddestroy))) {
567 spin_unlock(&glob->lru_lock);
568 return 0;
569 }
570
565 ret = ttm_bo_reserve_locked(bo, interruptible, 571 ret = ttm_bo_reserve_locked(bo, interruptible,
566 no_wait_reserve, false, 0); 572 no_wait_reserve, false, 0);
567 573
568 if (unlikely(ret != 0) || list_empty(&bo->ddestroy)) { 574 if (unlikely(ret != 0)) {
569 spin_unlock(&glob->lru_lock); 575 spin_unlock(&glob->lru_lock);
570 return ret; 576 return ret;
571 } 577 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 3f6343502d1f..5ff561d4e0b4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -140,7 +140,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
140 goto out_clips; 140 goto out_clips;
141 } 141 }
142 142
143 clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL); 143 clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
144 if (clips == NULL) { 144 if (clips == NULL) {
145 DRM_ERROR("Failed to allocate clip rect list.\n"); 145 DRM_ERROR("Failed to allocate clip rect list.\n");
146 ret = -ENOMEM; 146 ret = -ENOMEM;
@@ -232,7 +232,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
232 goto out_clips; 232 goto out_clips;
233 } 233 }
234 234
235 clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL); 235 clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
236 if (clips == NULL) { 236 if (clips == NULL) {
237 DRM_ERROR("Failed to allocate clip rect list.\n"); 237 DRM_ERROR("Failed to allocate clip rect list.\n");
238 ret = -ENOMEM; 238 ret = -ENOMEM;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 0585987f2945..1748a7142aca 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -105,6 +105,10 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
105 struct vmw_dma_buffer *dmabuf = NULL; 105 struct vmw_dma_buffer *dmabuf = NULL;
106 int ret; 106 int ret;
107 107
108 /* A lot of the code assumes this */
109 if (handle && (width != 64 || height != 64))
110 return -EINVAL;
111
108 if (handle) { 112 if (handle) {
109 ret = vmw_user_surface_lookup_handle(dev_priv, tfile, 113 ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
110 handle, &surface); 114 handle, &surface);
@@ -410,8 +414,9 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
410 top = clips->y1; 414 top = clips->y1;
411 bottom = clips->y2; 415 bottom = clips->y2;
412 416
413 clips_ptr = clips; 417 /* skip the first clip rect */
414 for (i = 1; i < num_clips; i++, clips_ptr += inc) { 418 for (i = 1, clips_ptr = clips + inc;
419 i < num_clips; i++, clips_ptr += inc) {
415 left = min_t(int, left, (int)clips_ptr->x1); 420 left = min_t(int, left, (int)clips_ptr->x1);
416 right = max_t(int, right, (int)clips_ptr->x2); 421 right = max_t(int, right, (int)clips_ptr->x2);
417 top = min_t(int, top, (int)clips_ptr->y1); 422 top = min_t(int, top, (int)clips_ptr->y1);
@@ -1331,7 +1336,10 @@ int vmw_kms_close(struct vmw_private *dev_priv)
1331 * drm_encoder_cleanup which takes the lock we deadlock. 1336 * drm_encoder_cleanup which takes the lock we deadlock.
1332 */ 1337 */
1333 drm_mode_config_cleanup(dev_priv->dev); 1338 drm_mode_config_cleanup(dev_priv->dev);
1334 vmw_kms_close_legacy_display_system(dev_priv); 1339 if (dev_priv->sou_priv)
1340 vmw_kms_close_screen_object_display(dev_priv);
1341 else
1342 vmw_kms_close_legacy_display_system(dev_priv);
1335 return 0; 1343 return 0;
1336} 1344}
1337 1345
@@ -1809,7 +1817,8 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
1809 } 1817 }
1810 1818
1811 rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect); 1819 rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
1812 rects = kzalloc(rects_size, GFP_KERNEL); 1820 rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
1821 GFP_KERNEL);
1813 if (unlikely(!rects)) { 1822 if (unlikely(!rects)) {
1814 ret = -ENOMEM; 1823 ret = -ENOMEM;
1815 goto out_unlock; 1824 goto out_unlock;
@@ -1824,10 +1833,10 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
1824 } 1833 }
1825 1834
1826 for (i = 0; i < arg->num_outputs; ++i) { 1835 for (i = 0; i < arg->num_outputs; ++i) {
1827 if (rects->x < 0 || 1836 if (rects[i].x < 0 ||
1828 rects->y < 0 || 1837 rects[i].y < 0 ||
1829 rects->x + rects->w > mode_config->max_width || 1838 rects[i].x + rects[i].w > mode_config->max_width ||
1830 rects->y + rects->h > mode_config->max_height) { 1839 rects[i].y + rects[i].h > mode_config->max_height) {
1831 DRM_ERROR("Invalid GUI layout.\n"); 1840 DRM_ERROR("Invalid GUI layout.\n");
1832 ret = -EINVAL; 1841 ret = -EINVAL;
1833 goto out_free; 1842 goto out_free;
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index c72f1c0b5e63..111d956d8e7d 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -465,31 +465,29 @@ static void vga_arbiter_check_bridge_sharing(struct vga_device *vgadev)
465 while (new_bus) { 465 while (new_bus) {
466 new_bridge = new_bus->self; 466 new_bridge = new_bus->self;
467 467
468 if (new_bridge) { 468 /* go through list of devices already registered */
469 /* go through list of devices already registered */ 469 list_for_each_entry(same_bridge_vgadev, &vga_list, list) {
470 list_for_each_entry(same_bridge_vgadev, &vga_list, list) { 470 bus = same_bridge_vgadev->pdev->bus;
471 bus = same_bridge_vgadev->pdev->bus; 471 bridge = bus->self;
472 bridge = bus->self; 472
473 473 /* see if the share a bridge with this device */
474 /* see if the share a bridge with this device */ 474 if (new_bridge == bridge) {
475 if (new_bridge == bridge) { 475 /* if their direct parent bridge is the same
476 /* if their direct parent bridge is the same 476 as any bridge of this device then it can't be used
477 as any bridge of this device then it can't be used 477 for that device */
478 for that device */ 478 same_bridge_vgadev->bridge_has_one_vga = false;
479 same_bridge_vgadev->bridge_has_one_vga = false; 479 }
480 }
481 480
482 /* now iterate the previous devices bridge hierarchy */ 481 /* now iterate the previous devices bridge hierarchy */
483 /* if the new devices parent bridge is in the other devices 482 /* if the new devices parent bridge is in the other devices
484 hierarchy then we can't use it to control this device */ 483 hierarchy then we can't use it to control this device */
485 while (bus) { 484 while (bus) {
486 bridge = bus->self; 485 bridge = bus->self;
487 if (bridge) { 486 if (bridge) {
488 if (bridge == vgadev->pdev->bus->self) 487 if (bridge == vgadev->pdev->bus->self)
489 vgadev->bridge_has_one_vga = false; 488 vgadev->bridge_has_one_vga = false;
490 }
491 bus = bus->parent;
492 } 489 }
490 bus = bus->parent;
493 } 491 }
494 } 492 }
495 new_bus = new_bus->parent; 493 new_bus = new_bus->parent;
@@ -993,14 +991,20 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
993 uc = &priv->cards[i]; 991 uc = &priv->cards[i];
994 } 992 }
995 993
996 if (!uc) 994 if (!uc) {
997 return -EINVAL; 995 ret_val = -EINVAL;
996 goto done;
997 }
998 998
999 if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0) 999 if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0) {
1000 return -EINVAL; 1000 ret_val = -EINVAL;
1001 goto done;
1002 }
1001 1003
1002 if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0) 1004 if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0) {
1003 return -EINVAL; 1005 ret_val = -EINVAL;
1006 goto done;
1007 }
1004 1008
1005 vga_put(pdev, io_state); 1009 vga_put(pdev, io_state);
1006 1010
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 848a56c0279c..af353842f75f 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1771,8 +1771,8 @@ static const struct hid_device_id hid_ignore_list[] = {
1771 { HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) }, 1771 { HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) },
1772 { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) }, 1772 { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) },
1773 { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) }, 1773 { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) },
1774 { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0001) },
1774 { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0002) }, 1775 { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0002) },
1775 { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0003) },
1776 { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0004) }, 1776 { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0004) },
1777 { HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_4_PHIDGETSERVO_30) }, 1777 { HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_4_PHIDGETSERVO_30) },
1778 { HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_1_PHIDGETSERVO_30) }, 1778 { HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_1_PHIDGETSERVO_30) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 06ce996b8b65..4a441a6f9967 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -266,7 +266,7 @@
266#define USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR 0x0002 266#define USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR 0x0002
267 267
268#define USB_VENDOR_ID_GENERAL_TOUCH 0x0dfc 268#define USB_VENDOR_ID_GENERAL_TOUCH 0x0dfc
269#define USB_DEVICE_ID_GENERAL_TOUCH_WIN7_TWOFINGERS 0x0001 269#define USB_DEVICE_ID_GENERAL_TOUCH_WIN7_TWOFINGERS 0x0003
270 270
271#define USB_VENDOR_ID_GLAB 0x06c2 271#define USB_VENDOR_ID_GLAB 0x06c2
272#define USB_DEVICE_ID_4_PHIDGETSERVO_30 0x0038 272#define USB_DEVICE_ID_4_PHIDGETSERVO_30 0x0038
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 9ec854ae118b..91be41f60809 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -315,7 +315,7 @@ config SENSORS_DS1621
315 315
316config SENSORS_EXYNOS4_TMU 316config SENSORS_EXYNOS4_TMU
317 tristate "Temperature sensor on Samsung EXYNOS4" 317 tristate "Temperature sensor on Samsung EXYNOS4"
318 depends on EXYNOS4_DEV_TMU 318 depends on ARCH_EXYNOS4
319 help 319 help
320 If you say yes here you get support for TMU (Thermal Managment 320 If you say yes here you get support for TMU (Thermal Managment
321 Unit) on SAMSUNG EXYNOS4 series of SoC. 321 Unit) on SAMSUNG EXYNOS4 series of SoC.
diff --git a/drivers/hwmon/ad7314.c b/drivers/hwmon/ad7314.c
index 318e38e85376..5d760f3d21c2 100644
--- a/drivers/hwmon/ad7314.c
+++ b/drivers/hwmon/ad7314.c
@@ -160,7 +160,6 @@ MODULE_DEVICE_TABLE(spi, ad7314_id);
160static struct spi_driver ad7314_driver = { 160static struct spi_driver ad7314_driver = {
161 .driver = { 161 .driver = {
162 .name = "ad7314", 162 .name = "ad7314",
163 .bus = &spi_bus_type,
164 .owner = THIS_MODULE, 163 .owner = THIS_MODULE,
165 }, 164 },
166 .probe = ad7314_probe, 165 .probe = ad7314_probe,
diff --git a/drivers/hwmon/ads7871.c b/drivers/hwmon/ads7871.c
index 52319340e182..04450f8bf5da 100644
--- a/drivers/hwmon/ads7871.c
+++ b/drivers/hwmon/ads7871.c
@@ -227,7 +227,6 @@ static int __devexit ads7871_remove(struct spi_device *spi)
227static struct spi_driver ads7871_driver = { 227static struct spi_driver ads7871_driver = {
228 .driver = { 228 .driver = {
229 .name = DEVICE_NAME, 229 .name = DEVICE_NAME,
230 .bus = &spi_bus_type,
231 .owner = THIS_MODULE, 230 .owner = THIS_MODULE,
232 }, 231 },
233 232
diff --git a/drivers/hwmon/exynos4_tmu.c b/drivers/hwmon/exynos4_tmu.c
index faa0884f61f6..f2359a0093bd 100644
--- a/drivers/hwmon/exynos4_tmu.c
+++ b/drivers/hwmon/exynos4_tmu.c
@@ -506,17 +506,7 @@ static struct platform_driver exynos4_tmu_driver = {
506 .resume = exynos4_tmu_resume, 506 .resume = exynos4_tmu_resume,
507}; 507};
508 508
509static int __init exynos4_tmu_driver_init(void) 509module_platform_driver(exynos4_tmu_driver);
510{
511 return platform_driver_register(&exynos4_tmu_driver);
512}
513module_init(exynos4_tmu_driver_init);
514
515static void __exit exynos4_tmu_driver_exit(void)
516{
517 platform_driver_unregister(&exynos4_tmu_driver);
518}
519module_exit(exynos4_tmu_driver_exit);
520 510
521MODULE_DESCRIPTION("EXYNOS4 TMU Driver"); 511MODULE_DESCRIPTION("EXYNOS4 TMU Driver");
522MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>"); 512MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>");
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
index 89aa9fb743af..9ba38f318ffb 100644
--- a/drivers/hwmon/gpio-fan.c
+++ b/drivers/hwmon/gpio-fan.c
@@ -539,18 +539,7 @@ static struct platform_driver gpio_fan_driver = {
539 }, 539 },
540}; 540};
541 541
542static int __init gpio_fan_init(void) 542module_platform_driver(gpio_fan_driver);
543{
544 return platform_driver_register(&gpio_fan_driver);
545}
546
547static void __exit gpio_fan_exit(void)
548{
549 platform_driver_unregister(&gpio_fan_driver);
550}
551
552module_init(gpio_fan_init);
553module_exit(gpio_fan_exit);
554 543
555MODULE_AUTHOR("Simon Guinot <sguinot@lacie.com>"); 544MODULE_AUTHOR("Simon Guinot <sguinot@lacie.com>");
556MODULE_DESCRIPTION("GPIO FAN driver"); 545MODULE_DESCRIPTION("GPIO FAN driver");
diff --git a/drivers/hwmon/jz4740-hwmon.c b/drivers/hwmon/jz4740-hwmon.c
index fea292d43407..5253d23361d9 100644
--- a/drivers/hwmon/jz4740-hwmon.c
+++ b/drivers/hwmon/jz4740-hwmon.c
@@ -59,7 +59,7 @@ static ssize_t jz4740_hwmon_read_adcin(struct device *dev,
59{ 59{
60 struct jz4740_hwmon *hwmon = dev_get_drvdata(dev); 60 struct jz4740_hwmon *hwmon = dev_get_drvdata(dev);
61 struct completion *completion = &hwmon->read_completion; 61 struct completion *completion = &hwmon->read_completion;
62 unsigned long t; 62 long t;
63 unsigned long val; 63 unsigned long val;
64 int ret; 64 int ret;
65 65
@@ -203,7 +203,7 @@ static int __devexit jz4740_hwmon_remove(struct platform_device *pdev)
203 return 0; 203 return 0;
204} 204}
205 205
206struct platform_driver jz4740_hwmon_driver = { 206static struct platform_driver jz4740_hwmon_driver = {
207 .probe = jz4740_hwmon_probe, 207 .probe = jz4740_hwmon_probe,
208 .remove = __devexit_p(jz4740_hwmon_remove), 208 .remove = __devexit_p(jz4740_hwmon_remove),
209 .driver = { 209 .driver = {
@@ -212,17 +212,7 @@ struct platform_driver jz4740_hwmon_driver = {
212 }, 212 },
213}; 213};
214 214
215static int __init jz4740_hwmon_init(void) 215module_platform_driver(jz4740_hwmon_driver);
216{
217 return platform_driver_register(&jz4740_hwmon_driver);
218}
219module_init(jz4740_hwmon_init);
220
221static void __exit jz4740_hwmon_exit(void)
222{
223 platform_driver_unregister(&jz4740_hwmon_driver);
224}
225module_exit(jz4740_hwmon_exit);
226 216
227MODULE_DESCRIPTION("JZ4740 SoC HWMON driver"); 217MODULE_DESCRIPTION("JZ4740 SoC HWMON driver");
228MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>"); 218MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index eab11615dced..9b382ec2c3bd 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -432,19 +432,7 @@ static struct platform_driver ntc_thermistor_driver = {
432 .id_table = ntc_thermistor_id, 432 .id_table = ntc_thermistor_id,
433}; 433};
434 434
435static int __init ntc_thermistor_init(void) 435module_platform_driver(ntc_thermistor_driver);
436{
437 return platform_driver_register(&ntc_thermistor_driver);
438}
439
440module_init(ntc_thermistor_init);
441
442static void __exit ntc_thermistor_cleanup(void)
443{
444 platform_driver_unregister(&ntc_thermistor_driver);
445}
446
447module_exit(ntc_thermistor_cleanup);
448 436
449MODULE_DESCRIPTION("NTC Thermistor Driver"); 437MODULE_DESCRIPTION("NTC Thermistor Driver");
450MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>"); 438MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
diff --git a/drivers/hwmon/s3c-hwmon.c b/drivers/hwmon/s3c-hwmon.c
index b39f52e2752a..f6c26d19f521 100644
--- a/drivers/hwmon/s3c-hwmon.c
+++ b/drivers/hwmon/s3c-hwmon.c
@@ -393,18 +393,7 @@ static struct platform_driver s3c_hwmon_driver = {
393 .remove = __devexit_p(s3c_hwmon_remove), 393 .remove = __devexit_p(s3c_hwmon_remove),
394}; 394};
395 395
396static int __init s3c_hwmon_init(void) 396module_platform_driver(s3c_hwmon_driver);
397{
398 return platform_driver_register(&s3c_hwmon_driver);
399}
400
401static void __exit s3c_hwmon_exit(void)
402{
403 platform_driver_unregister(&s3c_hwmon_driver);
404}
405
406module_init(s3c_hwmon_init);
407module_exit(s3c_hwmon_exit);
408 397
409MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>"); 398MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
410MODULE_DESCRIPTION("S3C ADC HWMon driver"); 399MODULE_DESCRIPTION("S3C ADC HWMon driver");
diff --git a/drivers/hwmon/sch5627.c b/drivers/hwmon/sch5627.c
index e3b5c6039c25..79b6dabe3161 100644
--- a/drivers/hwmon/sch5627.c
+++ b/drivers/hwmon/sch5627.c
@@ -590,19 +590,8 @@ static struct platform_driver sch5627_driver = {
590 .remove = sch5627_remove, 590 .remove = sch5627_remove,
591}; 591};
592 592
593static int __init sch5627_init(void) 593module_platform_driver(sch5627_driver);
594{
595 return platform_driver_register(&sch5627_driver);
596}
597
598static void __exit sch5627_exit(void)
599{
600 platform_driver_unregister(&sch5627_driver);
601}
602 594
603MODULE_DESCRIPTION("SMSC SCH5627 Hardware Monitoring Driver"); 595MODULE_DESCRIPTION("SMSC SCH5627 Hardware Monitoring Driver");
604MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>"); 596MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
605MODULE_LICENSE("GPL"); 597MODULE_LICENSE("GPL");
606
607module_init(sch5627_init);
608module_exit(sch5627_exit);
diff --git a/drivers/hwmon/sch5636.c b/drivers/hwmon/sch5636.c
index 244407aa79fc..9d5236fb09b4 100644
--- a/drivers/hwmon/sch5636.c
+++ b/drivers/hwmon/sch5636.c
@@ -521,19 +521,8 @@ static struct platform_driver sch5636_driver = {
521 .remove = sch5636_remove, 521 .remove = sch5636_remove,
522}; 522};
523 523
524static int __init sch5636_init(void) 524module_platform_driver(sch5636_driver);
525{
526 return platform_driver_register(&sch5636_driver);
527}
528
529static void __exit sch5636_exit(void)
530{
531 platform_driver_unregister(&sch5636_driver);
532}
533 525
534MODULE_DESCRIPTION("SMSC SCH5636 Hardware Monitoring Driver"); 526MODULE_DESCRIPTION("SMSC SCH5636 Hardware Monitoring Driver");
535MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>"); 527MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
536MODULE_LICENSE("GPL"); 528MODULE_LICENSE("GPL");
537
538module_init(sch5636_init);
539module_exit(sch5636_exit);
diff --git a/drivers/hwmon/twl4030-madc-hwmon.c b/drivers/hwmon/twl4030-madc-hwmon.c
index 57240740b161..0018c7dd0097 100644
--- a/drivers/hwmon/twl4030-madc-hwmon.c
+++ b/drivers/hwmon/twl4030-madc-hwmon.c
@@ -136,19 +136,7 @@ static struct platform_driver twl4030_madc_hwmon_driver = {
136 }, 136 },
137}; 137};
138 138
139static int __init twl4030_madc_hwmon_init(void) 139module_platform_driver(twl4030_madc_hwmon_driver);
140{
141 return platform_driver_register(&twl4030_madc_hwmon_driver);
142}
143
144module_init(twl4030_madc_hwmon_init);
145
146static void __exit twl4030_madc_hwmon_exit(void)
147{
148 platform_driver_unregister(&twl4030_madc_hwmon_driver);
149}
150
151module_exit(twl4030_madc_hwmon_exit);
152 140
153MODULE_DESCRIPTION("TWL4030 ADC Hwmon driver"); 141MODULE_DESCRIPTION("TWL4030 ADC Hwmon driver");
154MODULE_LICENSE("GPL"); 142MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/ultra45_env.c b/drivers/hwmon/ultra45_env.c
index 3cd07bf42dca..b9a87e89bab4 100644
--- a/drivers/hwmon/ultra45_env.c
+++ b/drivers/hwmon/ultra45_env.c
@@ -309,15 +309,4 @@ static struct platform_driver env_driver = {
309 .remove = __devexit_p(env_remove), 309 .remove = __devexit_p(env_remove),
310}; 310};
311 311
312static int __init env_init(void) 312module_platform_driver(env_driver);
313{
314 return platform_driver_register(&env_driver);
315}
316
317static void __exit env_exit(void)
318{
319 platform_driver_unregister(&env_driver);
320}
321
322module_init(env_init);
323module_exit(env_exit);
diff --git a/drivers/hwmon/wm831x-hwmon.c b/drivers/hwmon/wm831x-hwmon.c
index 97b1f834a471..9b598ed26020 100644
--- a/drivers/hwmon/wm831x-hwmon.c
+++ b/drivers/hwmon/wm831x-hwmon.c
@@ -209,17 +209,7 @@ static struct platform_driver wm831x_hwmon_driver = {
209 }, 209 },
210}; 210};
211 211
212static int __init wm831x_hwmon_init(void) 212module_platform_driver(wm831x_hwmon_driver);
213{
214 return platform_driver_register(&wm831x_hwmon_driver);
215}
216module_init(wm831x_hwmon_init);
217
218static void __exit wm831x_hwmon_exit(void)
219{
220 platform_driver_unregister(&wm831x_hwmon_driver);
221}
222module_exit(wm831x_hwmon_exit);
223 213
224MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>"); 214MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
225MODULE_DESCRIPTION("WM831x Hardware Monitoring"); 215MODULE_DESCRIPTION("WM831x Hardware Monitoring");
diff --git a/drivers/hwmon/wm8350-hwmon.c b/drivers/hwmon/wm8350-hwmon.c
index 13290595ca86..3ff67edbdc44 100644
--- a/drivers/hwmon/wm8350-hwmon.c
+++ b/drivers/hwmon/wm8350-hwmon.c
@@ -133,17 +133,7 @@ static struct platform_driver wm8350_hwmon_driver = {
133 }, 133 },
134}; 134};
135 135
136static int __init wm8350_hwmon_init(void) 136module_platform_driver(wm8350_hwmon_driver);
137{
138 return platform_driver_register(&wm8350_hwmon_driver);
139}
140module_init(wm8350_hwmon_init);
141
142static void __exit wm8350_hwmon_exit(void)
143{
144 platform_driver_unregister(&wm8350_hwmon_driver);
145}
146module_exit(wm8350_hwmon_exit);
147 137
148MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>"); 138MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
149MODULE_DESCRIPTION("WM8350 Hardware Monitoring"); 139MODULE_DESCRIPTION("WM8350 Hardware Monitoring");
diff --git a/drivers/hwspinlock/u8500_hsem.c b/drivers/hwspinlock/u8500_hsem.c
index 143461a95ae4..86980fe04117 100644
--- a/drivers/hwspinlock/u8500_hsem.c
+++ b/drivers/hwspinlock/u8500_hsem.c
@@ -21,6 +21,7 @@
21 * General Public License for more details. 21 * General Public License for more details.
22 */ 22 */
23 23
24#include <linux/module.h>
24#include <linux/delay.h> 25#include <linux/delay.h>
25#include <linux/io.h> 26#include <linux/io.h>
26#include <linux/pm_runtime.h> 27#include <linux/pm_runtime.h>
@@ -108,10 +109,8 @@ static int __devinit u8500_hsem_probe(struct platform_device *pdev)
108 return -ENODEV; 109 return -ENODEV;
109 110
110 io_base = ioremap(res->start, resource_size(res)); 111 io_base = ioremap(res->start, resource_size(res));
111 if (!io_base) { 112 if (!io_base)
112 ret = -ENOMEM; 113 return -ENOMEM;
113 goto free_state;
114 }
115 114
116 /* make sure protocol 1 is selected */ 115 /* make sure protocol 1 is selected */
117 val = readl(io_base + HSEM_CTRL_REG); 116 val = readl(io_base + HSEM_CTRL_REG);
diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c
index 85584a547c25..525c7345fa0b 100644
--- a/drivers/i2c/algos/i2c-algo-bit.c
+++ b/drivers/i2c/algos/i2c-algo-bit.c
@@ -488,7 +488,7 @@ static int bit_doAddress(struct i2c_adapter *i2c_adap, struct i2c_msg *msg)
488 488
489 if (flags & I2C_M_TEN) { 489 if (flags & I2C_M_TEN) {
490 /* a ten bit address */ 490 /* a ten bit address */
491 addr = 0xf0 | ((msg->addr >> 7) & 0x03); 491 addr = 0xf0 | ((msg->addr >> 7) & 0x06);
492 bit_dbg(2, &i2c_adap->dev, "addr0: %d\n", addr); 492 bit_dbg(2, &i2c_adap->dev, "addr0: %d\n", addr);
493 /* try extended address code...*/ 493 /* try extended address code...*/
494 ret = try_address(i2c_adap, addr, retries); 494 ret = try_address(i2c_adap, addr, retries);
@@ -498,7 +498,7 @@ static int bit_doAddress(struct i2c_adapter *i2c_adap, struct i2c_msg *msg)
498 return -ENXIO; 498 return -ENXIO;
499 } 499 }
500 /* the remaining 8 bit address */ 500 /* the remaining 8 bit address */
501 ret = i2c_outb(i2c_adap, msg->addr & 0x7f); 501 ret = i2c_outb(i2c_adap, msg->addr & 0xff);
502 if ((ret != 1) && !nak_ok) { 502 if ((ret != 1) && !nak_ok) {
503 /* the chip did not ack / xmission error occurred */ 503 /* the chip did not ack / xmission error occurred */
504 dev_err(&i2c_adap->dev, "died at 2nd address code\n"); 504 dev_err(&i2c_adap->dev, "died at 2nd address code\n");
diff --git a/drivers/i2c/busses/i2c-nuc900.c b/drivers/i2c/busses/i2c-nuc900.c
index 835e47b39bc2..03b615778887 100644
--- a/drivers/i2c/busses/i2c-nuc900.c
+++ b/drivers/i2c/busses/i2c-nuc900.c
@@ -593,7 +593,7 @@ static int __devinit nuc900_i2c_probe(struct platform_device *pdev)
593 i2c->adap.algo_data = i2c; 593 i2c->adap.algo_data = i2c;
594 i2c->adap.dev.parent = &pdev->dev; 594 i2c->adap.dev.parent = &pdev->dev;
595 595
596 mfp_set_groupg(&pdev->dev); 596 mfp_set_groupg(&pdev->dev, NULL);
597 597
598 clk_get_rate(i2c->clk); 598 clk_get_rate(i2c->clk);
599 599
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 131079a3e292..1e5606185b4f 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -539,8 +539,10 @@ i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
539 client->dev.type = &i2c_client_type; 539 client->dev.type = &i2c_client_type;
540 client->dev.of_node = info->of_node; 540 client->dev.of_node = info->of_node;
541 541
542 /* For 10-bit clients, add an arbitrary offset to avoid collisions */
542 dev_set_name(&client->dev, "%d-%04x", i2c_adapter_id(adap), 543 dev_set_name(&client->dev, "%d-%04x", i2c_adapter_id(adap),
543 client->addr); 544 client->addr | ((client->flags & I2C_CLIENT_TEN)
545 ? 0xa000 : 0));
544 status = device_register(&client->dev); 546 status = device_register(&client->dev);
545 if (status) 547 if (status)
546 goto out_err; 548 goto out_err;
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index c90ce50b619f..57a45ce84b2d 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -579,7 +579,7 @@ static int i2cdev_detach_adapter(struct device *dev, void *dummy)
579 return 0; 579 return 0;
580} 580}
581 581
582int i2cdev_notifier_call(struct notifier_block *nb, unsigned long action, 582static int i2cdev_notifier_call(struct notifier_block *nb, unsigned long action,
583 void *data) 583 void *data)
584{ 584{
585 struct device *dev = data; 585 struct device *dev = data;
diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
index 67cbcfa35122..847553fd8b96 100644
--- a/drivers/ide/cy82c693.c
+++ b/drivers/ide/cy82c693.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Copyright (C) 1998-2000 Andreas S. Krebs (akrebs@altavista.net), Maintainer 2 * Copyright (C) 1998-2000 Andreas S. Krebs (akrebs@altavista.net), Maintainer
3 * Copyright (C) 1998-2002 Andre Hedrick <andre@linux-ide.org>, Integrator 3 * Copyright (C) 1998-2002 Andre Hedrick <andre@linux-ide.org>, Integrator
4 * Copyright (C) 2007-2010 Bartlomiej Zolnierkiewicz 4 * Copyright (C) 2007-2011 Bartlomiej Zolnierkiewicz
5 * 5 *
6 * CYPRESS CY82C693 chipset IDE controller 6 * CYPRESS CY82C693 chipset IDE controller
7 * 7 *
@@ -90,7 +90,7 @@ static void cy82c693_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
90 u8 time_16, time_8; 90 u8 time_16, time_8;
91 91
92 /* select primary or secondary channel */ 92 /* select primary or secondary channel */
93 if (hwif->index > 0) { /* drive is on the secondary channel */ 93 if (drive->dn > 1) { /* drive is on the secondary channel */
94 dev = pci_get_slot(dev->bus, dev->devfn+1); 94 dev = pci_get_slot(dev->bus, dev->devfn+1);
95 if (!dev) { 95 if (!dev) {
96 printk(KERN_ERR "%s: tune_drive: " 96 printk(KERN_ERR "%s: tune_drive: "
@@ -141,7 +141,7 @@ static void cy82c693_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
141 pci_write_config_byte(dev, CY82_IDE_SLAVE_IOW, time_16); 141 pci_write_config_byte(dev, CY82_IDE_SLAVE_IOW, time_16);
142 pci_write_config_byte(dev, CY82_IDE_SLAVE_8BIT, time_8); 142 pci_write_config_byte(dev, CY82_IDE_SLAVE_8BIT, time_8);
143 } 143 }
144 if (hwif->index > 0) 144 if (drive->dn > 1)
145 pci_dev_put(dev); 145 pci_dev_put(dev);
146} 146}
147 147
diff --git a/drivers/ide/icside.c b/drivers/ide/icside.c
index 4a697a238e28..8716066a2f2b 100644
--- a/drivers/ide/icside.c
+++ b/drivers/ide/icside.c
@@ -521,8 +521,8 @@ icside_register_v6(struct icside_state *state, struct expansion_card *ec)
521 if (ec->dma != NO_DMA && !request_dma(ec->dma, DRV_NAME)) { 521 if (ec->dma != NO_DMA && !request_dma(ec->dma, DRV_NAME)) {
522 d.init_dma = icside_dma_init; 522 d.init_dma = icside_dma_init;
523 d.port_ops = &icside_v6_port_ops; 523 d.port_ops = &icside_v6_port_ops;
524 } else
524 d.dma_ops = NULL; 525 d.dma_ops = NULL;
525 }
526 526
527 ret = ide_host_register(host, &d, hws); 527 ret = ide_host_register(host, &d, hws);
528 if (ret) 528 if (ret)
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 04b09564bfa9..8126824daccb 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -43,7 +43,6 @@
43/* For SCSI -> ATAPI command conversion */ 43/* For SCSI -> ATAPI command conversion */
44#include <scsi/scsi.h> 44#include <scsi/scsi.h>
45 45
46#include <linux/irq.h>
47#include <linux/io.h> 46#include <linux/io.h>
48#include <asm/byteorder.h> 47#include <asm/byteorder.h>
49#include <linux/uaccess.h> 48#include <linux/uaccess.h>
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index 61fdf544fbd6..3d42043fec51 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -35,7 +35,6 @@
35#include <scsi/scsi_ioctl.h> 35#include <scsi/scsi_ioctl.h>
36 36
37#include <asm/byteorder.h> 37#include <asm/byteorder.h>
38#include <linux/irq.h>
39#include <linux/uaccess.h> 38#include <linux/uaccess.h>
40#include <linux/io.h> 39#include <linux/io.h>
41#include <asm/unaligned.h> 40#include <asm/unaligned.h>
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index 7ecb1ade8874..ce8237d36159 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -41,7 +41,6 @@
41#include <scsi/scsi.h> 41#include <scsi/scsi.h>
42 42
43#include <asm/byteorder.h> 43#include <asm/byteorder.h>
44#include <linux/irq.h>
45#include <linux/uaccess.h> 44#include <linux/uaccess.h>
46#include <linux/io.h> 45#include <linux/io.h>
47#include <asm/unaligned.h> 46#include <asm/unaligned.h>
diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
index b59d04c72051..1892e81fb00f 100644
--- a/drivers/ide/piix.c
+++ b/drivers/ide/piix.c
@@ -331,7 +331,7 @@ static const struct ide_port_ops ich_port_ops = {
331 .udma_mask = udma, \ 331 .udma_mask = udma, \
332 } 332 }
333 333
334#define DECLARE_ICH_DEV(udma) \ 334#define DECLARE_ICH_DEV(mwdma, udma) \
335 { \ 335 { \
336 .name = DRV_NAME, \ 336 .name = DRV_NAME, \
337 .init_chipset = init_chipset_ich, \ 337 .init_chipset = init_chipset_ich, \
@@ -340,7 +340,7 @@ static const struct ide_port_ops ich_port_ops = {
340 .port_ops = &ich_port_ops, \ 340 .port_ops = &ich_port_ops, \
341 .pio_mask = ATA_PIO4, \ 341 .pio_mask = ATA_PIO4, \
342 .swdma_mask = ATA_SWDMA2_ONLY, \ 342 .swdma_mask = ATA_SWDMA2_ONLY, \
343 .mwdma_mask = ATA_MWDMA12_ONLY, \ 343 .mwdma_mask = mwdma, \
344 .udma_mask = udma, \ 344 .udma_mask = udma, \
345 } 345 }
346 346
@@ -362,13 +362,15 @@ static const struct ide_port_info piix_pci_info[] __devinitdata = {
362 /* 2: PIIX4 */ 362 /* 2: PIIX4 */
363 DECLARE_PIIX_DEV(ATA_UDMA2), 363 DECLARE_PIIX_DEV(ATA_UDMA2),
364 /* 3: ICH0 */ 364 /* 3: ICH0 */
365 DECLARE_ICH_DEV(ATA_UDMA2), 365 DECLARE_ICH_DEV(ATA_MWDMA12_ONLY, ATA_UDMA2),
366 /* 4: ICH */ 366 /* 4: ICH */
367 DECLARE_ICH_DEV(ATA_UDMA4), 367 DECLARE_ICH_DEV(ATA_MWDMA12_ONLY, ATA_UDMA4),
368 /* 5: PIIX4 */ 368 /* 5: PIIX4 */
369 DECLARE_PIIX_DEV(ATA_UDMA4), 369 DECLARE_PIIX_DEV(ATA_UDMA4),
370 /* 6: ICH[2-7]/ICH[2-3]M/C-ICH/ICH5-SATA/ESB2/ICH8M */ 370 /* 6: ICH[2-6]/ICH[2-3]M/C-ICH/ICH5-SATA/ESB2/ICH8M */
371 DECLARE_ICH_DEV(ATA_UDMA5), 371 DECLARE_ICH_DEV(ATA_MWDMA12_ONLY, ATA_UDMA5),
372 /* 7: ICH7/7-R, no MWDMA1 */
373 DECLARE_ICH_DEV(ATA_MWDMA2_ONLY, ATA_UDMA5),
372}; 374};
373 375
374/** 376/**
@@ -438,9 +440,9 @@ static const struct pci_device_id piix_pci_tbl[] = {
438#endif 440#endif
439 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ESB_2), 6 }, 441 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ESB_2), 6 },
440 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICH6_19), 6 }, 442 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICH6_19), 6 },
441 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICH7_21), 6 }, 443 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICH7_21), 7 },
442 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801DB_1), 6 }, 444 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801DB_1), 6 },
443 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ESB2_18), 6 }, 445 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ESB2_18), 7 },
444 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICH8_6), 6 }, 446 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICH8_6), 6 },
445 { 0, }, 447 { 0, },
446}; 448};
diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
index e53a1b78378b..281c91426345 100644
--- a/drivers/ide/triflex.c
+++ b/drivers/ide/triflex.c
@@ -113,12 +113,26 @@ static const struct pci_device_id triflex_pci_tbl[] = {
113}; 113};
114MODULE_DEVICE_TABLE(pci, triflex_pci_tbl); 114MODULE_DEVICE_TABLE(pci, triflex_pci_tbl);
115 115
116#ifdef CONFIG_PM
117static int triflex_ide_pci_suspend(struct pci_dev *dev, pm_message_t state)
118{
119 /*
120 * We must not disable or powerdown the device.
121 * APM bios refuses to suspend if IDE is not accessible.
122 */
123 pci_save_state(dev);
124 return 0;
125}
126#else
127#define triflex_ide_pci_suspend NULL
128#endif
129
116static struct pci_driver triflex_pci_driver = { 130static struct pci_driver triflex_pci_driver = {
117 .name = "TRIFLEX_IDE", 131 .name = "TRIFLEX_IDE",
118 .id_table = triflex_pci_tbl, 132 .id_table = triflex_pci_tbl,
119 .probe = triflex_init_one, 133 .probe = triflex_init_one,
120 .remove = ide_pci_remove, 134 .remove = ide_pci_remove,
121 .suspend = ide_pci_suspend, 135 .suspend = triflex_ide_pci_suspend,
122 .resume = ide_pci_resume, 136 .resume = ide_pci_resume,
123}; 137};
124 138
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 691276bafd78..e9cf51b1343b 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -216,7 +216,9 @@ static int addr4_resolve(struct sockaddr_in *src_in,
216 216
217 neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, rt->dst.dev); 217 neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, rt->dst.dev);
218 if (!neigh || !(neigh->nud_state & NUD_VALID)) { 218 if (!neigh || !(neigh->nud_state & NUD_VALID)) {
219 rcu_read_lock();
219 neigh_event_send(dst_get_neighbour(&rt->dst), NULL); 220 neigh_event_send(dst_get_neighbour(&rt->dst), NULL);
221 rcu_read_unlock();
220 ret = -ENODATA; 222 ret = -ENODATA;
221 if (neigh) 223 if (neigh)
222 goto release; 224 goto release;
@@ -274,15 +276,16 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
274 goto put; 276 goto put;
275 } 277 }
276 278
279 rcu_read_lock();
277 neigh = dst_get_neighbour(dst); 280 neigh = dst_get_neighbour(dst);
278 if (!neigh || !(neigh->nud_state & NUD_VALID)) { 281 if (!neigh || !(neigh->nud_state & NUD_VALID)) {
279 if (neigh) 282 if (neigh)
280 neigh_event_send(neigh, NULL); 283 neigh_event_send(neigh, NULL);
281 ret = -ENODATA; 284 ret = -ENODATA;
282 goto put; 285 } else {
286 ret = rdma_copy_addr(addr, dst->dev, neigh->ha);
283 } 287 }
284 288 rcu_read_unlock();
285 ret = rdma_copy_addr(addr, dst->dev, neigh->ha);
286put: 289put:
287 dst_release(dst); 290 dst_release(dst);
288 return ret; 291 return ret;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index de6d0774e609..c88b12beef25 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -1375,8 +1375,10 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1375 goto reject; 1375 goto reject;
1376 } 1376 }
1377 dst = &rt->dst; 1377 dst = &rt->dst;
1378 rcu_read_lock();
1378 neigh = dst_get_neighbour(dst); 1379 neigh = dst_get_neighbour(dst);
1379 l2t = t3_l2t_get(tdev, neigh, neigh->dev); 1380 l2t = t3_l2t_get(tdev, neigh, neigh->dev);
1381 rcu_read_unlock();
1380 if (!l2t) { 1382 if (!l2t) {
1381 printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n", 1383 printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
1382 __func__); 1384 __func__);
@@ -1946,10 +1948,12 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1946 } 1948 }
1947 ep->dst = &rt->dst; 1949 ep->dst = &rt->dst;
1948 1950
1951 rcu_read_lock();
1949 neigh = dst_get_neighbour(ep->dst); 1952 neigh = dst_get_neighbour(ep->dst);
1950 1953
1951 /* get a l2t entry */ 1954 /* get a l2t entry */
1952 ep->l2t = t3_l2t_get(ep->com.tdev, neigh, neigh->dev); 1955 ep->l2t = t3_l2t_get(ep->com.tdev, neigh, neigh->dev);
1956 rcu_read_unlock();
1953 if (!ep->l2t) { 1957 if (!ep->l2t) {
1954 printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__); 1958 printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
1955 err = -ENOMEM; 1959 err = -ENOMEM;
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index b36cdac9c558..0747004313ad 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -542,8 +542,10 @@ static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
542 (mpa_rev_to_use == 2 ? MPA_ENHANCED_RDMA_CONN : 0); 542 (mpa_rev_to_use == 2 ? MPA_ENHANCED_RDMA_CONN : 0);
543 mpa->private_data_size = htons(ep->plen); 543 mpa->private_data_size = htons(ep->plen);
544 mpa->revision = mpa_rev_to_use; 544 mpa->revision = mpa_rev_to_use;
545 if (mpa_rev_to_use == 1) 545 if (mpa_rev_to_use == 1) {
546 ep->tried_with_mpa_v1 = 1; 546 ep->tried_with_mpa_v1 = 1;
547 ep->retry_with_mpa_v1 = 0;
548 }
547 549
548 if (mpa_rev_to_use == 2) { 550 if (mpa_rev_to_use == 2) {
549 mpa->private_data_size += 551 mpa->private_data_size +=
@@ -1594,6 +1596,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
1594 goto reject; 1596 goto reject;
1595 } 1597 }
1596 dst = &rt->dst; 1598 dst = &rt->dst;
1599 rcu_read_lock();
1597 neigh = dst_get_neighbour(dst); 1600 neigh = dst_get_neighbour(dst);
1598 if (neigh->dev->flags & IFF_LOOPBACK) { 1601 if (neigh->dev->flags & IFF_LOOPBACK) {
1599 pdev = ip_dev_find(&init_net, peer_ip); 1602 pdev = ip_dev_find(&init_net, peer_ip);
@@ -1620,6 +1623,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
1620 rss_qid = dev->rdev.lldi.rxq_ids[ 1623 rss_qid = dev->rdev.lldi.rxq_ids[
1621 cxgb4_port_idx(neigh->dev) * step]; 1624 cxgb4_port_idx(neigh->dev) * step];
1622 } 1625 }
1626 rcu_read_unlock();
1623 if (!l2t) { 1627 if (!l2t) {
1624 printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n", 1628 printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
1625 __func__); 1629 __func__);
@@ -1820,6 +1824,7 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
1820 } 1824 }
1821 ep->dst = &rt->dst; 1825 ep->dst = &rt->dst;
1822 1826
1827 rcu_read_lock();
1823 neigh = dst_get_neighbour(ep->dst); 1828 neigh = dst_get_neighbour(ep->dst);
1824 1829
1825 /* get a l2t entry */ 1830 /* get a l2t entry */
@@ -1856,6 +1861,7 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
1856 ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[ 1861 ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
1857 cxgb4_port_idx(neigh->dev) * step]; 1862 cxgb4_port_idx(neigh->dev) * step];
1858 } 1863 }
1864 rcu_read_unlock();
1859 if (!ep->l2t) { 1865 if (!ep->l2t) {
1860 printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__); 1866 printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
1861 err = -ENOMEM; 1867 err = -ENOMEM;
@@ -2301,6 +2307,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2301 } 2307 }
2302 ep->dst = &rt->dst; 2308 ep->dst = &rt->dst;
2303 2309
2310 rcu_read_lock();
2304 neigh = dst_get_neighbour(ep->dst); 2311 neigh = dst_get_neighbour(ep->dst);
2305 2312
2306 /* get a l2t entry */ 2313 /* get a l2t entry */
@@ -2339,6 +2346,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2339 ep->retry_with_mpa_v1 = 0; 2346 ep->retry_with_mpa_v1 = 0;
2340 ep->tried_with_mpa_v1 = 0; 2347 ep->tried_with_mpa_v1 = 0;
2341 } 2348 }
2349 rcu_read_unlock();
2342 if (!ep->l2t) { 2350 if (!ep->l2t) {
2343 printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__); 2351 printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
2344 err = -ENOMEM; 2352 err = -ENOMEM;
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index f35a935267e7..0f1607c8325a 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -311,7 +311,7 @@ void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
311 while (ptr != cq->sw_pidx) { 311 while (ptr != cq->sw_pidx) {
312 cqe = &cq->sw_queue[ptr]; 312 cqe = &cq->sw_queue[ptr];
313 if (RQ_TYPE(cqe) && (CQE_OPCODE(cqe) != FW_RI_READ_RESP) && 313 if (RQ_TYPE(cqe) && (CQE_OPCODE(cqe) != FW_RI_READ_RESP) &&
314 (CQE_QPID(cqe) == wq->rq.qid) && cqe_completes_wr(cqe, wq)) 314 (CQE_QPID(cqe) == wq->sq.qid) && cqe_completes_wr(cqe, wq))
315 (*count)++; 315 (*count)++;
316 if (++ptr == cq->size) 316 if (++ptr == cq->size)
317 ptr = 0; 317 ptr = 0;
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index dfce9ea98a39..0a52d72371ee 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -1377,9 +1377,11 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
1377 neigh_release(neigh); 1377 neigh_release(neigh);
1378 } 1378 }
1379 1379
1380 if ((neigh == NULL) || (!(neigh->nud_state & NUD_VALID))) 1380 if ((neigh == NULL) || (!(neigh->nud_state & NUD_VALID))) {
1381 rcu_read_lock();
1381 neigh_event_send(dst_get_neighbour(&rt->dst), NULL); 1382 neigh_event_send(dst_get_neighbour(&rt->dst), NULL);
1382 1383 rcu_read_unlock();
1384 }
1383 ip_rt_put(rt); 1385 ip_rt_put(rt);
1384 return rc; 1386 return rc;
1385} 1387}
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 5bd2162b95dc..1d5895941e19 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -2307,19 +2307,11 @@ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
2307 SYM_LSB(IBCCtrlA_0, MaxPktLen); 2307 SYM_LSB(IBCCtrlA_0, MaxPktLen);
2308 ppd->cpspec->ibcctrl_a = ibc; /* without linkcmd or linkinitcmd! */ 2308 ppd->cpspec->ibcctrl_a = ibc; /* without linkcmd or linkinitcmd! */
2309 2309
2310 /* initially come up waiting for TS1, without sending anything. */
2311 val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
2312 QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
2313
2314 ppd->cpspec->ibcctrl_a = val;
2315 /* 2310 /*
2316 * Reset the PCS interface to the serdes (and also ibc, which is still 2311 * Reset the PCS interface to the serdes (and also ibc, which is still
2317 * in reset from above). Writes new value of ibcctrl_a as last step. 2312 * in reset from above). Writes new value of ibcctrl_a as last step.
2318 */ 2313 */
2319 qib_7322_mini_pcs_reset(ppd); 2314 qib_7322_mini_pcs_reset(ppd);
2320 qib_write_kreg(dd, kr_scratch, 0ULL);
2321 /* clear the linkinit cmds */
2322 ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, LinkInitCmd);
2323 2315
2324 if (!ppd->cpspec->ibcctrl_b) { 2316 if (!ppd->cpspec->ibcctrl_b) {
2325 unsigned lse = ppd->link_speed_enabled; 2317 unsigned lse = ppd->link_speed_enabled;
@@ -2385,6 +2377,14 @@ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
2385 ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0, IBLinkEn); 2377 ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0, IBLinkEn);
2386 set_vls(ppd); 2378 set_vls(ppd);
2387 2379
2380 /* initially come up DISABLED, without sending anything. */
2381 val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
2382 QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
2383 qib_write_kreg_port(ppd, krp_ibcctrl_a, val);
2384 qib_write_kreg(dd, kr_scratch, 0ULL);
2385 /* clear the linkinit cmds */
2386 ppd->cpspec->ibcctrl_a = val & ~SYM_MASK(IBCCtrlA_0, LinkInitCmd);
2387
2388 /* be paranoid against later code motion, etc. */ 2388 /* be paranoid against later code motion, etc. */
2389 spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags); 2389 spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
2390 ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvIBPortEnable); 2390 ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvIBPortEnable);
@@ -5241,7 +5241,7 @@ static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
5241 off */ 5241 off */
5242 if (ppd->dd->flags & QIB_HAS_QSFP) { 5242 if (ppd->dd->flags & QIB_HAS_QSFP) {
5243 qd->t_insert = get_jiffies_64(); 5243 qd->t_insert = get_jiffies_64();
5244 schedule_work(&qd->work); 5244 queue_work(ib_wq, &qd->work);
5245 } 5245 }
5246 spin_lock_irqsave(&ppd->sdma_lock, flags); 5246 spin_lock_irqsave(&ppd->sdma_lock, flags);
5247 if (__qib_sdma_running(ppd)) 5247 if (__qib_sdma_running(ppd))
diff --git a/drivers/infiniband/hw/qib/qib_qsfp.c b/drivers/infiniband/hw/qib/qib_qsfp.c
index e06c4ed383f1..fa71b1e666c5 100644
--- a/drivers/infiniband/hw/qib/qib_qsfp.c
+++ b/drivers/infiniband/hw/qib/qib_qsfp.c
@@ -480,18 +480,6 @@ void qib_qsfp_init(struct qib_qsfp_data *qd,
480 udelay(20); /* Generous RST dwell */ 480 udelay(20); /* Generous RST dwell */
481 481
482 dd->f_gpio_mod(dd, mask, mask, mask); 482 dd->f_gpio_mod(dd, mask, mask, mask);
483 /* Spec says module can take up to two seconds! */
484 mask = QSFP_GPIO_MOD_PRS_N;
485 if (qd->ppd->hw_pidx)
486 mask <<= QSFP_GPIO_PORT2_SHIFT;
487
488 /* Do not try to wait here. Better to let event handle it */
489 if (!qib_qsfp_mod_present(qd->ppd))
490 goto bail;
491 /* We see a module, but it may be unwise to look yet. Just schedule */
492 qd->t_insert = get_jiffies_64();
493 queue_work(ib_wq, &qd->work);
494bail:
495 return; 483 return;
496} 484}
497 485
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 0ef9af94997d..4115be54ba3b 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -57,21 +57,24 @@ struct ipoib_ah *ipoib_create_ah(struct net_device *dev,
57 struct ib_pd *pd, struct ib_ah_attr *attr) 57 struct ib_pd *pd, struct ib_ah_attr *attr)
58{ 58{
59 struct ipoib_ah *ah; 59 struct ipoib_ah *ah;
60 struct ib_ah *vah;
60 61
61 ah = kmalloc(sizeof *ah, GFP_KERNEL); 62 ah = kmalloc(sizeof *ah, GFP_KERNEL);
62 if (!ah) 63 if (!ah)
63 return NULL; 64 return ERR_PTR(-ENOMEM);
64 65
65 ah->dev = dev; 66 ah->dev = dev;
66 ah->last_send = 0; 67 ah->last_send = 0;
67 kref_init(&ah->ref); 68 kref_init(&ah->ref);
68 69
69 ah->ah = ib_create_ah(pd, attr); 70 vah = ib_create_ah(pd, attr);
70 if (IS_ERR(ah->ah)) { 71 if (IS_ERR(vah)) {
71 kfree(ah); 72 kfree(ah);
72 ah = NULL; 73 ah = (struct ipoib_ah *)vah;
73 } else 74 } else {
75 ah->ah = vah;
74 ipoib_dbg(netdev_priv(dev), "Created ah %p\n", ah->ah); 76 ipoib_dbg(netdev_priv(dev), "Created ah %p\n", ah->ah);
77 }
75 78
76 return ah; 79 return ah;
77} 80}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 7567b6000230..83695b48b010 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -432,7 +432,7 @@ static void path_rec_completion(int status,
432 432
433 spin_lock_irqsave(&priv->lock, flags); 433 spin_lock_irqsave(&priv->lock, flags);
434 434
435 if (ah) { 435 if (!IS_ERR_OR_NULL(ah)) {
436 path->pathrec = *pathrec; 436 path->pathrec = *pathrec;
437 437
438 old_ah = path->ah; 438 old_ah = path->ah;
@@ -555,6 +555,7 @@ static int path_rec_start(struct net_device *dev,
555 return 0; 555 return 0;
556} 556}
557 557
558/* called with rcu_read_lock */
558static void neigh_add_path(struct sk_buff *skb, struct net_device *dev) 559static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
559{ 560{
560 struct ipoib_dev_priv *priv = netdev_priv(dev); 561 struct ipoib_dev_priv *priv = netdev_priv(dev);
@@ -636,6 +637,7 @@ err_drop:
636 spin_unlock_irqrestore(&priv->lock, flags); 637 spin_unlock_irqrestore(&priv->lock, flags);
637} 638}
638 639
640/* called with rcu_read_lock */
639static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev) 641static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev)
640{ 642{
641 struct ipoib_dev_priv *priv = netdev_priv(skb->dev); 643 struct ipoib_dev_priv *priv = netdev_priv(skb->dev);
@@ -720,13 +722,14 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
720 struct neighbour *n = NULL; 722 struct neighbour *n = NULL;
721 unsigned long flags; 723 unsigned long flags;
722 724
725 rcu_read_lock();
723 if (likely(skb_dst(skb))) 726 if (likely(skb_dst(skb)))
724 n = dst_get_neighbour(skb_dst(skb)); 727 n = dst_get_neighbour(skb_dst(skb));
725 728
726 if (likely(n)) { 729 if (likely(n)) {
727 if (unlikely(!*to_ipoib_neigh(n))) { 730 if (unlikely(!*to_ipoib_neigh(n))) {
728 ipoib_path_lookup(skb, dev); 731 ipoib_path_lookup(skb, dev);
729 return NETDEV_TX_OK; 732 goto unlock;
730 } 733 }
731 734
732 neigh = *to_ipoib_neigh(n); 735 neigh = *to_ipoib_neigh(n);
@@ -749,17 +752,17 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
749 ipoib_neigh_free(dev, neigh); 752 ipoib_neigh_free(dev, neigh);
750 spin_unlock_irqrestore(&priv->lock, flags); 753 spin_unlock_irqrestore(&priv->lock, flags);
751 ipoib_path_lookup(skb, dev); 754 ipoib_path_lookup(skb, dev);
752 return NETDEV_TX_OK; 755 goto unlock;
753 } 756 }
754 757
755 if (ipoib_cm_get(neigh)) { 758 if (ipoib_cm_get(neigh)) {
756 if (ipoib_cm_up(neigh)) { 759 if (ipoib_cm_up(neigh)) {
757 ipoib_cm_send(dev, skb, ipoib_cm_get(neigh)); 760 ipoib_cm_send(dev, skb, ipoib_cm_get(neigh));
758 return NETDEV_TX_OK; 761 goto unlock;
759 } 762 }
760 } else if (neigh->ah) { 763 } else if (neigh->ah) {
761 ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(n->ha)); 764 ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(n->ha));
762 return NETDEV_TX_OK; 765 goto unlock;
763 } 766 }
764 767
765 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) { 768 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
@@ -793,13 +796,14 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
793 phdr->hwaddr + 4); 796 phdr->hwaddr + 4);
794 dev_kfree_skb_any(skb); 797 dev_kfree_skb_any(skb);
795 ++dev->stats.tx_dropped; 798 ++dev->stats.tx_dropped;
796 return NETDEV_TX_OK; 799 goto unlock;
797 } 800 }
798 801
799 unicast_arp_send(skb, dev, phdr); 802 unicast_arp_send(skb, dev, phdr);
800 } 803 }
801 } 804 }
802 805unlock:
806 rcu_read_unlock();
803 return NETDEV_TX_OK; 807 return NETDEV_TX_OK;
804} 808}
805 809
@@ -837,7 +841,7 @@ static int ipoib_hard_header(struct sk_buff *skb,
837 dst = skb_dst(skb); 841 dst = skb_dst(skb);
838 n = NULL; 842 n = NULL;
839 if (dst) 843 if (dst)
840 n = dst_get_neighbour(dst); 844 n = dst_get_neighbour_raw(dst);
841 if ((!dst || !n) && daddr) { 845 if ((!dst || !n) && daddr) {
842 struct ipoib_pseudoheader *phdr = 846 struct ipoib_pseudoheader *phdr =
843 (struct ipoib_pseudoheader *) skb_push(skb, sizeof *phdr); 847 (struct ipoib_pseudoheader *) skb_push(skb, sizeof *phdr);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 1b7a97686356..873bff97e69e 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -240,8 +240,11 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
240 av.grh.dgid = mcast->mcmember.mgid; 240 av.grh.dgid = mcast->mcmember.mgid;
241 241
242 ah = ipoib_create_ah(dev, priv->pd, &av); 242 ah = ipoib_create_ah(dev, priv->pd, &av);
243 if (!ah) { 243 if (IS_ERR(ah)) {
244 ipoib_warn(priv, "ib_address_create failed\n"); 244 ipoib_warn(priv, "ib_address_create failed %ld\n",
245 -PTR_ERR(ah));
246 /* use original error */
247 return PTR_ERR(ah);
245 } else { 248 } else {
246 spin_lock_irq(&priv->lock); 249 spin_lock_irq(&priv->lock);
247 mcast->ah = ah; 250 mcast->ah = ah;
@@ -266,7 +269,7 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
266 269
267 skb->dev = dev; 270 skb->dev = dev;
268 if (dst) 271 if (dst)
269 n = dst_get_neighbour(dst); 272 n = dst_get_neighbour_raw(dst);
270 if (!dst || !n) { 273 if (!dst || !n) {
271 /* put pseudoheader back on for next time */ 274 /* put pseudoheader back on for next time */
272 skb_push(skb, sizeof (struct ipoib_pseudoheader)); 275 skb_push(skb, sizeof (struct ipoib_pseudoheader));
@@ -722,6 +725,8 @@ out:
722 if (mcast && mcast->ah) { 725 if (mcast && mcast->ah) {
723 struct dst_entry *dst = skb_dst(skb); 726 struct dst_entry *dst = skb_dst(skb);
724 struct neighbour *n = NULL; 727 struct neighbour *n = NULL;
728
729 rcu_read_lock();
725 if (dst) 730 if (dst)
726 n = dst_get_neighbour(dst); 731 n = dst_get_neighbour(dst);
727 if (n && !*to_ipoib_neigh(n)) { 732 if (n && !*to_ipoib_neigh(n)) {
@@ -734,7 +739,7 @@ out:
734 list_add_tail(&neigh->list, &mcast->neigh_list); 739 list_add_tail(&neigh->list, &mcast->neigh_list);
735 } 740 }
736 } 741 }
737 742 rcu_read_unlock();
738 spin_unlock_irqrestore(&priv->lock, flags); 743 spin_unlock_irqrestore(&priv->lock, flags);
739 ipoib_send(dev, skb, mcast->ah, IB_MULTICAST_QPN); 744 ipoib_send(dev, skb, mcast->ah, IB_MULTICAST_QPN);
740 return; 745 return;
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 09b93b11a274..e2a9867c19d5 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1210,18 +1210,28 @@ static int elantech_reconnect(struct psmouse *psmouse)
1210 */ 1210 */
1211static int elantech_set_properties(struct elantech_data *etd) 1211static int elantech_set_properties(struct elantech_data *etd)
1212{ 1212{
1213 /* This represents the version of IC body. */
1213 int ver = (etd->fw_version & 0x0f0000) >> 16; 1214 int ver = (etd->fw_version & 0x0f0000) >> 16;
1214 1215
1216 /* Early version of Elan touchpads doesn't obey the rule. */
1215 if (etd->fw_version < 0x020030 || etd->fw_version == 0x020600) 1217 if (etd->fw_version < 0x020030 || etd->fw_version == 0x020600)
1216 etd->hw_version = 1; 1218 etd->hw_version = 1;
1217 else if (etd->fw_version < 0x150600) 1219 else {
1218 etd->hw_version = 2; 1220 switch (ver) {
1219 else if (ver == 5) 1221 case 2:
1220 etd->hw_version = 3; 1222 case 4:
1221 else if (ver == 6) 1223 etd->hw_version = 2;
1222 etd->hw_version = 4; 1224 break;
1223 else 1225 case 5:
1224 return -1; 1226 etd->hw_version = 3;
1227 break;
1228 case 6:
1229 etd->hw_version = 4;
1230 break;
1231 default:
1232 return -1;
1233 }
1234 }
1225 1235
1226 /* 1236 /*
1227 * Turn on packet checking by default. 1237 * Turn on packet checking by default.
diff --git a/drivers/input/serio/ams_delta_serio.c b/drivers/input/serio/ams_delta_serio.c
index 4b2a42f9f0bb..d4d08bd9205b 100644
--- a/drivers/input/serio/ams_delta_serio.c
+++ b/drivers/input/serio/ams_delta_serio.c
@@ -24,6 +24,7 @@
24#include <linux/irq.h> 24#include <linux/irq.h>
25#include <linux/serio.h> 25#include <linux/serio.h>
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/module.h>
27 28
28#include <asm/mach-types.h> 29#include <asm/mach-types.h>
29#include <plat/board-ams-delta.h> 30#include <plat/board-ams-delta.h>
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index bb9f5d31f0d0..b4cfc6c8be89 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -431,6 +431,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
431 DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"), 431 DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"),
432 }, 432 },
433 }, 433 },
434 {
435 /* Newer HP Pavilion dv4 models */
436 .matches = {
437 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
438 DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4 Notebook PC"),
439 },
440 },
434 { } 441 { }
435}; 442};
436 443
@@ -560,6 +567,13 @@ static const struct dmi_system_id __initconst i8042_dmi_notimeout_table[] = {
560 DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"), 567 DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"),
561 }, 568 },
562 }, 569 },
570 {
571 /* Newer HP Pavilion dv4 models */
572 .matches = {
573 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
574 DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4 Notebook PC"),
575 },
576 },
563 { } 577 { }
564}; 578};
565 579
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index c0c7820d4c46..bdc447fd4766 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -405,6 +405,9 @@ int dmar_disabled = 0;
405int dmar_disabled = 1; 405int dmar_disabled = 1;
406#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/ 406#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
407 407
408int intel_iommu_enabled = 0;
409EXPORT_SYMBOL_GPL(intel_iommu_enabled);
410
408static int dmar_map_gfx = 1; 411static int dmar_map_gfx = 1;
409static int dmar_forcedac; 412static int dmar_forcedac;
410static int intel_iommu_strict; 413static int intel_iommu_strict;
@@ -3524,7 +3527,7 @@ found:
3524 return 0; 3527 return 0;
3525} 3528}
3526 3529
3527int dmar_parse_rmrr_atsr_dev(void) 3530int __init dmar_parse_rmrr_atsr_dev(void)
3528{ 3531{
3529 struct dmar_rmrr_unit *rmrr, *rmrr_n; 3532 struct dmar_rmrr_unit *rmrr, *rmrr_n;
3530 struct dmar_atsr_unit *atsr, *atsr_n; 3533 struct dmar_atsr_unit *atsr, *atsr_n;
@@ -3647,6 +3650,8 @@ int __init intel_iommu_init(void)
3647 3650
3648 bus_register_notifier(&pci_bus_type, &device_nb); 3651 bus_register_notifier(&pci_bus_type, &device_nb);
3649 3652
3653 intel_iommu_enabled = 1;
3654
3650 return 0; 3655 return 0;
3651} 3656}
3652 3657
diff --git a/drivers/iommu/intr_remapping.c b/drivers/iommu/intr_remapping.c
index 07c9f189f314..6777ca049471 100644
--- a/drivers/iommu/intr_remapping.c
+++ b/drivers/iommu/intr_remapping.c
@@ -773,7 +773,7 @@ int __init parse_ioapics_under_ir(void)
773 return ir_supported; 773 return ir_supported;
774} 774}
775 775
776int ir_dev_scope_init(void) 776int __init ir_dev_scope_init(void)
777{ 777{
778 if (!intr_remapping_enabled) 778 if (!intr_remapping_enabled)
779 return 0; 779 return 0;
diff --git a/drivers/iommu/omap-iommu-debug.c b/drivers/iommu/omap-iommu-debug.c
index 9c192e79f806..288da5c1499d 100644
--- a/drivers/iommu/omap-iommu-debug.c
+++ b/drivers/iommu/omap-iommu-debug.c
@@ -10,6 +10,7 @@
10 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
11 */ 11 */
12 12
13#include <linux/module.h>
13#include <linux/err.h> 14#include <linux/err.h>
14#include <linux/clk.h> 15#include <linux/clk.h>
15#include <linux/io.h> 16#include <linux/io.h>
diff --git a/drivers/iommu/omap-iovmm.c b/drivers/iommu/omap-iovmm.c
index e8fdb8830f69..46be456fcc00 100644
--- a/drivers/iommu/omap-iovmm.c
+++ b/drivers/iommu/omap-iovmm.c
@@ -10,6 +10,7 @@
10 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
11 */ 11 */
12 12
13#include <linux/module.h>
13#include <linux/err.h> 14#include <linux/err.h>
14#include <linux/slab.h> 15#include <linux/slab.h>
15#include <linux/vmalloc.h> 16#include <linux/vmalloc.h>
diff --git a/drivers/isdn/divert/divert_procfs.c b/drivers/isdn/divert/divert_procfs.c
index 33ec9e467772..9021182c4b76 100644
--- a/drivers/isdn/divert/divert_procfs.c
+++ b/drivers/isdn/divert/divert_procfs.c
@@ -242,6 +242,12 @@ static int isdn_divert_ioctl_unlocked(struct file *file, uint cmd, ulong arg)
242 case IIOCDOCFINT: 242 case IIOCDOCFINT:
243 if (!divert_if.drv_to_name(dioctl.cf_ctrl.drvid)) 243 if (!divert_if.drv_to_name(dioctl.cf_ctrl.drvid))
244 return (-EINVAL); /* invalid driver */ 244 return (-EINVAL); /* invalid driver */
245 if (strnlen(dioctl.cf_ctrl.msn, sizeof(dioctl.cf_ctrl.msn)) ==
246 sizeof(dioctl.cf_ctrl.msn))
247 return -EINVAL;
248 if (strnlen(dioctl.cf_ctrl.fwd_nr, sizeof(dioctl.cf_ctrl.fwd_nr)) ==
249 sizeof(dioctl.cf_ctrl.fwd_nr))
250 return -EINVAL;
245 if ((i = cf_command(dioctl.cf_ctrl.drvid, 251 if ((i = cf_command(dioctl.cf_ctrl.drvid,
246 (cmd == IIOCDOCFACT) ? 1 : (cmd == IIOCDOCFDIS) ? 0 : 2, 252 (cmd == IIOCDOCFACT) ? 1 : (cmd == IIOCDOCFDIS) ? 0 : 2,
247 dioctl.cf_ctrl.cfproc, 253 dioctl.cf_ctrl.cfproc,
diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
index 1f73d7f7e024..2339d7396b9e 100644
--- a/drivers/isdn/i4l/isdn_net.c
+++ b/drivers/isdn/i4l/isdn_net.c
@@ -2756,6 +2756,9 @@ isdn_net_setcfg(isdn_net_ioctl_cfg * cfg)
2756 char *c, 2756 char *c,
2757 *e; 2757 *e;
2758 2758
2759 if (strnlen(cfg->drvid, sizeof(cfg->drvid)) ==
2760 sizeof(cfg->drvid))
2761 return -EINVAL;
2759 drvidx = -1; 2762 drvidx = -1;
2760 chidx = -1; 2763 chidx = -1;
2761 strcpy(drvid, cfg->drvid); 2764 strcpy(drvid, cfg->drvid);
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 661b692573e7..6d5628bb0601 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -270,11 +270,8 @@ void led_blink_set(struct led_classdev *led_cdev,
270 del_timer_sync(&led_cdev->blink_timer); 270 del_timer_sync(&led_cdev->blink_timer);
271 271
272 if (led_cdev->blink_set && 272 if (led_cdev->blink_set &&
273 !led_cdev->blink_set(led_cdev, delay_on, delay_off)) { 273 !led_cdev->blink_set(led_cdev, delay_on, delay_off))
274 led_cdev->blink_delay_on = *delay_on;
275 led_cdev->blink_delay_off = *delay_off;
276 return; 274 return;
277 }
278 275
279 /* blink with 1 Hz as default if nothing specified */ 276 /* blink with 1 Hz as default if nothing specified */
280 if (!*delay_on && !*delay_off) 277 if (!*delay_on && !*delay_off)
diff --git a/drivers/macintosh/via-macii.c b/drivers/macintosh/via-macii.c
index 817f37a875c9..c9570fcf1cce 100644
--- a/drivers/macintosh/via-macii.c
+++ b/drivers/macintosh/via-macii.c
@@ -159,7 +159,7 @@ int macii_init(void)
159 err = macii_init_via(); 159 err = macii_init_via();
160 if (err) goto out; 160 if (err) goto out;
161 161
162 err = request_irq(IRQ_MAC_ADB, macii_interrupt, IRQ_FLG_LOCK, "ADB", 162 err = request_irq(IRQ_MAC_ADB, macii_interrupt, 0, "ADB",
163 macii_interrupt); 163 macii_interrupt);
164 if (err) goto out; 164 if (err) goto out;
165 165
diff --git a/drivers/macintosh/via-maciisi.c b/drivers/macintosh/via-maciisi.c
index 9ab5b0c34f0d..34d02a91b29f 100644
--- a/drivers/macintosh/via-maciisi.c
+++ b/drivers/macintosh/via-maciisi.c
@@ -122,8 +122,8 @@ maciisi_init(void)
122 return err; 122 return err;
123 } 123 }
124 124
125 if (request_irq(IRQ_MAC_ADB, maciisi_interrupt, IRQ_FLG_LOCK | IRQ_FLG_FAST, 125 if (request_irq(IRQ_MAC_ADB, maciisi_interrupt, 0, "ADB",
126 "ADB", maciisi_interrupt)) { 126 maciisi_interrupt)) {
127 printk(KERN_ERR "maciisi_init: can't get irq %d\n", IRQ_MAC_ADB); 127 printk(KERN_ERR "maciisi_init: can't get irq %d\n", IRQ_MAC_ADB);
128 return -EAGAIN; 128 return -EAGAIN;
129 } 129 }
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 7878712721bf..b6907118283a 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -1106,10 +1106,12 @@ void bitmap_write_all(struct bitmap *bitmap)
1106 */ 1106 */
1107 int i; 1107 int i;
1108 1108
1109 spin_lock_irq(&bitmap->lock);
1109 for (i = 0; i < bitmap->file_pages; i++) 1110 for (i = 0; i < bitmap->file_pages; i++)
1110 set_page_attr(bitmap, bitmap->filemap[i], 1111 set_page_attr(bitmap, bitmap->filemap[i],
1111 BITMAP_PAGE_NEEDWRITE); 1112 BITMAP_PAGE_NEEDWRITE);
1112 bitmap->allclean = 0; 1113 bitmap->allclean = 0;
1114 spin_unlock_irq(&bitmap->lock);
1113} 1115}
1114 1116
1115static void bitmap_count_page(struct bitmap *bitmap, sector_t offset, int inc) 1117static void bitmap_count_page(struct bitmap *bitmap, sector_t offset, int inc)
@@ -1605,7 +1607,9 @@ void bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e)
1605 for (chunk = s; chunk <= e; chunk++) { 1607 for (chunk = s; chunk <= e; chunk++) {
1606 sector_t sec = (sector_t)chunk << CHUNK_BLOCK_SHIFT(bitmap); 1608 sector_t sec = (sector_t)chunk << CHUNK_BLOCK_SHIFT(bitmap);
1607 bitmap_set_memory_bits(bitmap, sec, 1); 1609 bitmap_set_memory_bits(bitmap, sec, 1);
1610 spin_lock_irq(&bitmap->lock);
1608 bitmap_file_set_bit(bitmap, sec); 1611 bitmap_file_set_bit(bitmap, sec);
1612 spin_unlock_irq(&bitmap->lock);
1609 if (sec < bitmap->mddev->recovery_cp) 1613 if (sec < bitmap->mddev->recovery_cp)
1610 /* We are asserting that the array is dirty, 1614 /* We are asserting that the array is dirty,
1611 * so move the recovery_cp address back so 1615 * so move the recovery_cp address back so
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 84acfe7d10e4..ee981737edfc 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -570,7 +570,7 @@ static void mddev_put(struct mddev *mddev)
570 mddev->ctime == 0 && !mddev->hold_active) { 570 mddev->ctime == 0 && !mddev->hold_active) {
571 /* Array is not configured at all, and not held active, 571 /* Array is not configured at all, and not held active,
572 * so destroy it */ 572 * so destroy it */
573 list_del(&mddev->all_mddevs); 573 list_del_init(&mddev->all_mddevs);
574 bs = mddev->bio_set; 574 bs = mddev->bio_set;
575 mddev->bio_set = NULL; 575 mddev->bio_set = NULL;
576 if (mddev->gendisk) { 576 if (mddev->gendisk) {
@@ -2546,7 +2546,8 @@ state_show(struct md_rdev *rdev, char *page)
2546 sep = ","; 2546 sep = ",";
2547 } 2547 }
2548 if (test_bit(Blocked, &rdev->flags) || 2548 if (test_bit(Blocked, &rdev->flags) ||
2549 rdev->badblocks.unacked_exist) { 2549 (rdev->badblocks.unacked_exist
2550 && !test_bit(Faulty, &rdev->flags))) {
2550 len += sprintf(page+len, "%sblocked", sep); 2551 len += sprintf(page+len, "%sblocked", sep);
2551 sep = ","; 2552 sep = ",";
2552 } 2553 }
@@ -3788,6 +3789,8 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
3788 if (err) 3789 if (err)
3789 return err; 3790 return err;
3790 else { 3791 else {
3792 if (mddev->hold_active == UNTIL_IOCTL)
3793 mddev->hold_active = 0;
3791 sysfs_notify_dirent_safe(mddev->sysfs_state); 3794 sysfs_notify_dirent_safe(mddev->sysfs_state);
3792 return len; 3795 return len;
3793 } 3796 }
@@ -4487,11 +4490,20 @@ md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
4487 4490
4488 if (!entry->show) 4491 if (!entry->show)
4489 return -EIO; 4492 return -EIO;
4493 spin_lock(&all_mddevs_lock);
4494 if (list_empty(&mddev->all_mddevs)) {
4495 spin_unlock(&all_mddevs_lock);
4496 return -EBUSY;
4497 }
4498 mddev_get(mddev);
4499 spin_unlock(&all_mddevs_lock);
4500
4490 rv = mddev_lock(mddev); 4501 rv = mddev_lock(mddev);
4491 if (!rv) { 4502 if (!rv) {
4492 rv = entry->show(mddev, page); 4503 rv = entry->show(mddev, page);
4493 mddev_unlock(mddev); 4504 mddev_unlock(mddev);
4494 } 4505 }
4506 mddev_put(mddev);
4495 return rv; 4507 return rv;
4496} 4508}
4497 4509
@@ -4507,13 +4519,19 @@ md_attr_store(struct kobject *kobj, struct attribute *attr,
4507 return -EIO; 4519 return -EIO;
4508 if (!capable(CAP_SYS_ADMIN)) 4520 if (!capable(CAP_SYS_ADMIN))
4509 return -EACCES; 4521 return -EACCES;
4522 spin_lock(&all_mddevs_lock);
4523 if (list_empty(&mddev->all_mddevs)) {
4524 spin_unlock(&all_mddevs_lock);
4525 return -EBUSY;
4526 }
4527 mddev_get(mddev);
4528 spin_unlock(&all_mddevs_lock);
4510 rv = mddev_lock(mddev); 4529 rv = mddev_lock(mddev);
4511 if (mddev->hold_active == UNTIL_IOCTL)
4512 mddev->hold_active = 0;
4513 if (!rv) { 4530 if (!rv) {
4514 rv = entry->store(mddev, page, length); 4531 rv = entry->store(mddev, page, length);
4515 mddev_unlock(mddev); 4532 mddev_unlock(mddev);
4516 } 4533 }
4534 mddev_put(mddev);
4517 return rv; 4535 return rv;
4518} 4536}
4519 4537
@@ -7840,6 +7858,7 @@ int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
7840 s + rdev->data_offset, sectors, acknowledged); 7858 s + rdev->data_offset, sectors, acknowledged);
7841 if (rv) { 7859 if (rv) {
7842 /* Make sure they get written out promptly */ 7860 /* Make sure they get written out promptly */
7861 sysfs_notify_dirent_safe(rdev->sysfs_state);
7843 set_bit(MD_CHANGE_CLEAN, &rdev->mddev->flags); 7862 set_bit(MD_CHANGE_CLEAN, &rdev->mddev->flags);
7844 md_wakeup_thread(rdev->mddev->thread); 7863 md_wakeup_thread(rdev->mddev->thread);
7845 } 7864 }
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 472aedfb07cf..31670f8d6b65 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3036,6 +3036,8 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
3036 if (dev->written) 3036 if (dev->written)
3037 s->written++; 3037 s->written++;
3038 rdev = rcu_dereference(conf->disks[i].rdev); 3038 rdev = rcu_dereference(conf->disks[i].rdev);
3039 if (rdev && test_bit(Faulty, &rdev->flags))
3040 rdev = NULL;
3039 if (rdev) { 3041 if (rdev) {
3040 is_bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS, 3042 is_bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS,
3041 &first_bad, &bad_sectors); 3043 &first_bad, &bad_sectors);
@@ -3063,12 +3065,12 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
3063 } 3065 }
3064 } else if (test_bit(In_sync, &rdev->flags)) 3066 } else if (test_bit(In_sync, &rdev->flags))
3065 set_bit(R5_Insync, &dev->flags); 3067 set_bit(R5_Insync, &dev->flags);
3066 else if (!test_bit(Faulty, &rdev->flags)) { 3068 else {
3067 /* in sync if before recovery_offset */ 3069 /* in sync if before recovery_offset */
3068 if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset) 3070 if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset)
3069 set_bit(R5_Insync, &dev->flags); 3071 set_bit(R5_Insync, &dev->flags);
3070 } 3072 }
3071 if (test_bit(R5_WriteError, &dev->flags)) { 3073 if (rdev && test_bit(R5_WriteError, &dev->flags)) {
3072 clear_bit(R5_Insync, &dev->flags); 3074 clear_bit(R5_Insync, &dev->flags);
3073 if (!test_bit(Faulty, &rdev->flags)) { 3075 if (!test_bit(Faulty, &rdev->flags)) {
3074 s->handle_bad_blocks = 1; 3076 s->handle_bad_blocks = 1;
@@ -3076,7 +3078,7 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
3076 } else 3078 } else
3077 clear_bit(R5_WriteError, &dev->flags); 3079 clear_bit(R5_WriteError, &dev->flags);
3078 } 3080 }
3079 if (test_bit(R5_MadeGood, &dev->flags)) { 3081 if (rdev && test_bit(R5_MadeGood, &dev->flags)) {
3080 if (!test_bit(Faulty, &rdev->flags)) { 3082 if (!test_bit(Faulty, &rdev->flags)) {
3081 s->handle_bad_blocks = 1; 3083 s->handle_bad_blocks = 1;
3082 atomic_inc(&rdev->nr_pending); 3084 atomic_inc(&rdev->nr_pending);
@@ -3110,7 +3112,7 @@ static void handle_stripe(struct stripe_head *sh)
3110 struct r5dev *pdev, *qdev; 3112 struct r5dev *pdev, *qdev;
3111 3113
3112 clear_bit(STRIPE_HANDLE, &sh->state); 3114 clear_bit(STRIPE_HANDLE, &sh->state);
3113 if (test_and_set_bit(STRIPE_ACTIVE, &sh->state)) { 3115 if (test_and_set_bit_lock(STRIPE_ACTIVE, &sh->state)) {
3114 /* already being handled, ensure it gets handled 3116 /* already being handled, ensure it gets handled
3115 * again when current action finishes */ 3117 * again when current action finishes */
3116 set_bit(STRIPE_HANDLE, &sh->state); 3118 set_bit(STRIPE_HANDLE, &sh->state);
@@ -3159,10 +3161,14 @@ static void handle_stripe(struct stripe_head *sh)
3159 /* check if the array has lost more than max_degraded devices and, 3161 /* check if the array has lost more than max_degraded devices and,
3160 * if so, some requests might need to be failed. 3162 * if so, some requests might need to be failed.
3161 */ 3163 */
3162 if (s.failed > conf->max_degraded && s.to_read+s.to_write+s.written) 3164 if (s.failed > conf->max_degraded) {
3163 handle_failed_stripe(conf, sh, &s, disks, &s.return_bi); 3165 sh->check_state = 0;
3164 if (s.failed > conf->max_degraded && s.syncing) 3166 sh->reconstruct_state = 0;
3165 handle_failed_sync(conf, sh, &s); 3167 if (s.to_read+s.to_write+s.written)
3168 handle_failed_stripe(conf, sh, &s, disks, &s.return_bi);
3169 if (s.syncing)
3170 handle_failed_sync(conf, sh, &s);
3171 }
3166 3172
3167 /* 3173 /*
3168 * might be able to return some write requests if the parity blocks 3174 * might be able to return some write requests if the parity blocks
@@ -3371,7 +3377,7 @@ finish:
3371 3377
3372 return_io(s.return_bi); 3378 return_io(s.return_bi);
3373 3379
3374 clear_bit(STRIPE_ACTIVE, &sh->state); 3380 clear_bit_unlock(STRIPE_ACTIVE, &sh->state);
3375} 3381}
3376 3382
3377static void raid5_activate_delayed(struct r5conf *conf) 3383static void raid5_activate_delayed(struct r5conf *conf)
diff --git a/drivers/media/dvb/dvb-usb/mxl111sf-i2c.c b/drivers/media/dvb/dvb-usb/mxl111sf-i2c.c
index 2e8c288258a9..34434557ef65 100644
--- a/drivers/media/dvb/dvb-usb/mxl111sf-i2c.c
+++ b/drivers/media/dvb/dvb-usb/mxl111sf-i2c.c
@@ -398,7 +398,6 @@ static int mxl111sf_i2c_readagain(struct mxl111sf_state *state,
398 u8 i2c_r_data[24]; 398 u8 i2c_r_data[24];
399 u8 i = 0; 399 u8 i = 0;
400 u8 fifo_status = 0; 400 u8 fifo_status = 0;
401 int ret;
402 int status = 0; 401 int status = 0;
403 402
404 mxl_i2c("read %d bytes", count); 403 mxl_i2c("read %d bytes", count);
@@ -418,7 +417,7 @@ static int mxl111sf_i2c_readagain(struct mxl111sf_state *state,
418 i2c_w_data[4+(i*3)] = 0x00; 417 i2c_w_data[4+(i*3)] = 0x00;
419 } 418 }
420 419
421 ret = mxl111sf_i2c_get_data(state, 0, i2c_w_data, i2c_r_data); 420 mxl111sf_i2c_get_data(state, 0, i2c_w_data, i2c_r_data);
422 421
423 /* Check for I2C NACK status */ 422 /* Check for I2C NACK status */
424 if (mxl111sf_i2c_check_status(state) == 1) { 423 if (mxl111sf_i2c_check_status(state) == 1) {
diff --git a/drivers/media/dvb/dvb-usb/mxl111sf-phy.c b/drivers/media/dvb/dvb-usb/mxl111sf-phy.c
index 91dc1fc2825b..b741b3a7a325 100644
--- a/drivers/media/dvb/dvb-usb/mxl111sf-phy.c
+++ b/drivers/media/dvb/dvb-usb/mxl111sf-phy.c
@@ -296,8 +296,7 @@ int mxl111sf_config_spi(struct mxl111sf_state *state, int onoff)
296 goto fail; 296 goto fail;
297 297
298 ret = mxl111sf_write_reg(state, 0x00, 0x00); 298 ret = mxl111sf_write_reg(state, 0x00, 0x00);
299 if (mxl_fail(ret)) 299 mxl_fail(ret);
300 goto fail;
301fail: 300fail:
302 return ret; 301 return ret;
303} 302}
@@ -328,11 +327,13 @@ int mxl111sf_idac_config(struct mxl111sf_state *state,
328 /* set hysteresis value reg: 0x0B<5:0> */ 327 /* set hysteresis value reg: 0x0B<5:0> */
329 ret = mxl111sf_write_reg(state, V6_IDAC_HYSTERESIS_REG, 328 ret = mxl111sf_write_reg(state, V6_IDAC_HYSTERESIS_REG,
330 (hysteresis_value & 0x3F)); 329 (hysteresis_value & 0x3F));
330 mxl_fail(ret);
331 } 331 }
332 332
333 ret = mxl111sf_write_reg(state, V6_IDAC_SETTINGS_REG, val); 333 ret = mxl111sf_write_reg(state, V6_IDAC_SETTINGS_REG, val);
334 mxl_fail(ret);
334 335
335 return val; 336 return ret;
336} 337}
337 338
338/* 339/*
diff --git a/drivers/media/video/s5k6aa.c b/drivers/media/video/s5k6aa.c
index 2446736b7871..0df7f2a41814 100644
--- a/drivers/media/video/s5k6aa.c
+++ b/drivers/media/video/s5k6aa.c
@@ -19,6 +19,7 @@
19#include <linux/gpio.h> 19#include <linux/gpio.h>
20#include <linux/i2c.h> 20#include <linux/i2c.h>
21#include <linux/media.h> 21#include <linux/media.h>
22#include <linux/module.h>
22#include <linux/regulator/consumer.h> 23#include <linux/regulator/consumer.h>
23#include <linux/slab.h> 24#include <linux/slab.h>
24 25
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_dec.c b/drivers/media/video/s5p-mfc/s5p_mfc_dec.c
index 725634d9736d..844a4d7797bc 100644
--- a/drivers/media/video/s5p-mfc/s5p_mfc_dec.c
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_dec.c
@@ -220,8 +220,8 @@ static int vidioc_querycap(struct file *file, void *priv,
220 strncpy(cap->card, dev->plat_dev->name, sizeof(cap->card) - 1); 220 strncpy(cap->card, dev->plat_dev->name, sizeof(cap->card) - 1);
221 cap->bus_info[0] = 0; 221 cap->bus_info[0] = 0;
222 cap->version = KERNEL_VERSION(1, 0, 0); 222 cap->version = KERNEL_VERSION(1, 0, 0);
223 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT 223 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE_MPLANE |
224 | V4L2_CAP_STREAMING; 224 V4L2_CAP_VIDEO_OUTPUT_MPLANE | V4L2_CAP_STREAMING;
225 return 0; 225 return 0;
226} 226}
227 227
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_enc.c b/drivers/media/video/s5p-mfc/s5p_mfc_enc.c
index ecef127dbc66..1e8cdb77d4b8 100644
--- a/drivers/media/video/s5p-mfc/s5p_mfc_enc.c
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_enc.c
@@ -785,8 +785,8 @@ static int vidioc_querycap(struct file *file, void *priv,
785 strncpy(cap->card, dev->plat_dev->name, sizeof(cap->card) - 1); 785 strncpy(cap->card, dev->plat_dev->name, sizeof(cap->card) - 1);
786 cap->bus_info[0] = 0; 786 cap->bus_info[0] = 0;
787 cap->version = KERNEL_VERSION(1, 0, 0); 787 cap->version = KERNEL_VERSION(1, 0, 0);
788 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE 788 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE_MPLANE
789 | V4L2_CAP_VIDEO_OUTPUT 789 | V4L2_CAP_VIDEO_OUTPUT_MPLANE
790 | V4L2_CAP_STREAMING; 790 | V4L2_CAP_STREAMING;
791 return 0; 791 return 0;
792} 792}
diff --git a/drivers/media/video/uvc/uvc_ctrl.c b/drivers/media/video/uvc/uvc_ctrl.c
index 10c2364f3e8a..254d32688843 100644
--- a/drivers/media/video/uvc/uvc_ctrl.c
+++ b/drivers/media/video/uvc/uvc_ctrl.c
@@ -1016,7 +1016,8 @@ int uvc_query_v4l2_menu(struct uvc_video_chain *chain,
1016 1016
1017 menu_info = &mapping->menu_info[query_menu->index]; 1017 menu_info = &mapping->menu_info[query_menu->index];
1018 1018
1019 if (ctrl->info.flags & UVC_CTRL_FLAG_GET_RES) { 1019 if (mapping->data_type == UVC_CTRL_DATA_TYPE_BITMASK &&
1020 (ctrl->info.flags & UVC_CTRL_FLAG_GET_RES)) {
1020 s32 bitmap; 1021 s32 bitmap;
1021 1022
1022 if (!ctrl->cached) { 1023 if (!ctrl->cached) {
@@ -1225,7 +1226,8 @@ int uvc_ctrl_set(struct uvc_video_chain *chain,
1225 /* Valid menu indices are reported by the GET_RES request for 1226 /* Valid menu indices are reported by the GET_RES request for
1226 * UVC controls that support it. 1227 * UVC controls that support it.
1227 */ 1228 */
1228 if (ctrl->info.flags & UVC_CTRL_FLAG_GET_RES) { 1229 if (mapping->data_type == UVC_CTRL_DATA_TYPE_BITMASK &&
1230 (ctrl->info.flags & UVC_CTRL_FLAG_GET_RES)) {
1229 if (!ctrl->cached) { 1231 if (!ctrl->cached) {
1230 ret = uvc_ctrl_populate_cache(chain, ctrl); 1232 ret = uvc_ctrl_populate_cache(chain, ctrl);
1231 if (ret < 0) 1233 if (ret < 0)
diff --git a/drivers/media/video/v4l2-ctrls.c b/drivers/media/video/v4l2-ctrls.c
index f17f92b86a30..0f415dade05a 100644
--- a/drivers/media/video/v4l2-ctrls.c
+++ b/drivers/media/video/v4l2-ctrls.c
@@ -821,8 +821,8 @@ static void send_event(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 changes)
821 fill_event(&ev, ctrl, changes); 821 fill_event(&ev, ctrl, changes);
822 822
823 list_for_each_entry(sev, &ctrl->ev_subs, node) 823 list_for_each_entry(sev, &ctrl->ev_subs, node)
824 if (sev->fh && (sev->fh != fh || 824 if (sev->fh != fh ||
825 (sev->flags & V4L2_EVENT_SUB_FL_ALLOW_FEEDBACK))) 825 (sev->flags & V4L2_EVENT_SUB_FL_ALLOW_FEEDBACK))
826 v4l2_event_queue_fh(sev->fh, &ev); 826 v4l2_event_queue_fh(sev->fh, &ev);
827} 827}
828 828
@@ -947,6 +947,7 @@ static void new_to_cur(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl,
947 if (ctrl->cluster[0]->has_volatiles) 947 if (ctrl->cluster[0]->has_volatiles)
948 ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE; 948 ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
949 } 949 }
950 fh = NULL;
950 } 951 }
951 if (changed || update_inactive) { 952 if (changed || update_inactive) {
952 /* If a control was changed that was not one of the controls 953 /* If a control was changed that was not one of the controls
diff --git a/drivers/media/video/v4l2-event.c b/drivers/media/video/v4l2-event.c
index 46037f225529..c26ad9637143 100644
--- a/drivers/media/video/v4l2-event.c
+++ b/drivers/media/video/v4l2-event.c
@@ -216,6 +216,9 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
216 unsigned long flags; 216 unsigned long flags;
217 unsigned i; 217 unsigned i;
218 218
219 if (sub->type == V4L2_EVENT_ALL)
220 return -EINVAL;
221
219 if (elems < 1) 222 if (elems < 1)
220 elems = 1; 223 elems = 1;
221 if (sub->type == V4L2_EVENT_CTRL) { 224 if (sub->type == V4L2_EVENT_CTRL) {
@@ -283,6 +286,7 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
283{ 286{
284 struct v4l2_subscribed_event *sev; 287 struct v4l2_subscribed_event *sev;
285 unsigned long flags; 288 unsigned long flags;
289 int i;
286 290
287 if (sub->type == V4L2_EVENT_ALL) { 291 if (sub->type == V4L2_EVENT_ALL) {
288 v4l2_event_unsubscribe_all(fh); 292 v4l2_event_unsubscribe_all(fh);
@@ -293,8 +297,12 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
293 297
294 sev = v4l2_event_subscribed(fh, sub->type, sub->id); 298 sev = v4l2_event_subscribed(fh, sub->type, sub->id);
295 if (sev != NULL) { 299 if (sev != NULL) {
300 /* Remove any pending events for this subscription */
301 for (i = 0; i < sev->in_use; i++) {
302 list_del(&sev->events[sev_pos(sev, i)].list);
303 fh->navailable--;
304 }
296 list_del(&sev->list); 305 list_del(&sev->list);
297 sev->fh = NULL;
298 } 306 }
299 307
300 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); 308 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
diff --git a/drivers/media/video/videobuf2-core.c b/drivers/media/video/videobuf2-core.c
index 979e544388cb..95a3f5e82aef 100644
--- a/drivers/media/video/videobuf2-core.c
+++ b/drivers/media/video/videobuf2-core.c
@@ -131,6 +131,7 @@ static void __setup_offsets(struct vb2_queue *q, unsigned int n)
131 continue; 131 continue;
132 132
133 for (plane = 0; plane < vb->num_planes; ++plane) { 133 for (plane = 0; plane < vb->num_planes; ++plane) {
134 vb->v4l2_planes[plane].length = q->plane_sizes[plane];
134 vb->v4l2_planes[plane].m.mem_offset = off; 135 vb->v4l2_planes[plane].m.mem_offset = off;
135 136
136 dprintk(3, "Buffer %d, plane %d offset 0x%08lx\n", 137 dprintk(3, "Buffer %d, plane %d offset 0x%08lx\n",
@@ -264,6 +265,7 @@ static void __vb2_queue_free(struct vb2_queue *q, unsigned int buffers)
264 q->num_buffers -= buffers; 265 q->num_buffers -= buffers;
265 if (!q->num_buffers) 266 if (!q->num_buffers)
266 q->memory = 0; 267 q->memory = 0;
268 INIT_LIST_HEAD(&q->queued_list);
267} 269}
268 270
269/** 271/**
@@ -296,14 +298,14 @@ static bool __buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb)
296{ 298{
297 unsigned int plane; 299 unsigned int plane;
298 for (plane = 0; plane < vb->num_planes; ++plane) { 300 for (plane = 0; plane < vb->num_planes; ++plane) {
301 void *mem_priv = vb->planes[plane].mem_priv;
299 /* 302 /*
300 * If num_users() has not been provided, call_memop 303 * If num_users() has not been provided, call_memop
301 * will return 0, apparently nobody cares about this 304 * will return 0, apparently nobody cares about this
302 * case anyway. If num_users() returns more than 1, 305 * case anyway. If num_users() returns more than 1,
303 * we are not the only user of the plane's memory. 306 * we are not the only user of the plane's memory.
304 */ 307 */
305 if (call_memop(q, plane, num_users, 308 if (mem_priv && call_memop(q, plane, num_users, mem_priv) > 1)
306 vb->planes[plane].mem_priv) > 1)
307 return true; 309 return true;
308 } 310 }
309 return false; 311 return false;
diff --git a/drivers/mfd/ab5500-core.c b/drivers/mfd/ab5500-core.c
index 4175544b491b..ec10629a0b0b 100644
--- a/drivers/mfd/ab5500-core.c
+++ b/drivers/mfd/ab5500-core.c
@@ -13,6 +13,7 @@
13 * TODO: Event handling with irq_chip. Waiting for PRCMU fw support. 13 * TODO: Event handling with irq_chip. Waiting for PRCMU fw support.
14 */ 14 */
15 15
16#include <linux/module.h>
16#include <linux/mutex.h> 17#include <linux/mutex.h>
17#include <linux/err.h> 18#include <linux/err.h>
18#include <linux/platform_device.h> 19#include <linux/platform_device.h>
diff --git a/drivers/mfd/ab5500-debugfs.c b/drivers/mfd/ab5500-debugfs.c
index 6be1fe6b5f9a..43c0ebb81956 100644
--- a/drivers/mfd/ab5500-debugfs.c
+++ b/drivers/mfd/ab5500-debugfs.c
@@ -4,6 +4,7 @@
4 * Debugfs support for the AB5500 MFD driver 4 * Debugfs support for the AB5500 MFD driver
5 */ 5 */
6 6
7#include <linux/export.h>
7#include <linux/debugfs.h> 8#include <linux/debugfs.h>
8#include <linux/seq_file.h> 9#include <linux/seq_file.h>
9#include <linux/mfd/ab5500/ab5500.h> 10#include <linux/mfd/ab5500/ab5500.h>
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index d593878d66d0..5664696f2d3a 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -472,7 +472,7 @@ config BMP085
472 module will be called bmp085. 472 module will be called bmp085.
473 473
474config PCH_PHUB 474config PCH_PHUB
475 tristate "Intel EG20T PCH / OKI SEMICONDUCTOR IOH(ML7213/ML7223) PHUB" 475 tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) PHUB"
476 depends on PCI 476 depends on PCI
477 help 477 help
478 This driver is for PCH(Platform controller Hub) PHUB(Packet Hub) of 478 This driver is for PCH(Platform controller Hub) PHUB(Packet Hub) of
@@ -480,12 +480,13 @@ config PCH_PHUB
480 processor. The Topcliff has MAC address and Option ROM data in SROM. 480 processor. The Topcliff has MAC address and Option ROM data in SROM.
481 This driver can access MAC address and Option ROM data in SROM. 481 This driver can access MAC address and Option ROM data in SROM.
482 482
483 This driver also can be used for OKI SEMICONDUCTOR IOH(Input/ 483 This driver also can be used for LAPIS Semiconductor's IOH,
484 Output Hub), ML7213 and ML7223. 484 ML7213/ML7223/ML7831.
485 ML7213 IOH is for IVI(In-Vehicle Infotainment) use and ML7223 IOH is 485 ML7213 which is for IVI(In-Vehicle Infotainment) use.
486 for MP(Media Phone) use. 486 ML7223 IOH is for MP(Media Phone) use.
487 ML7213/ML7223 is companion chip for Intel Atom E6xx series. 487 ML7831 IOH is for general purpose use.
488 ML7213/ML7223 is completely compatible for Intel EG20T PCH. 488 ML7213/ML7223/ML7831 is companion chip for Intel Atom E6xx series.
489 ML7213/ML7223/ML7831 is completely compatible for Intel EG20T PCH.
489 490
490 To compile this driver as a module, choose M here: the module will 491 To compile this driver as a module, choose M here: the module will
491 be called pch_phub. 492 be called pch_phub.
diff --git a/drivers/misc/ad525x_dpot.h b/drivers/misc/ad525x_dpot.h
index a662f5987b68..82b2cb77ae19 100644
--- a/drivers/misc/ad525x_dpot.h
+++ b/drivers/misc/ad525x_dpot.h
@@ -100,7 +100,7 @@ enum dpot_devid {
100 AD5293_ID = DPOT_CONF(F_RDACS_RW | F_SPI_16BIT, BRDAC0, 10, 27), 100 AD5293_ID = DPOT_CONF(F_RDACS_RW | F_SPI_16BIT, BRDAC0, 10, 27),
101 AD7376_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT, 101 AD7376_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
102 BRDAC0, 7, 28), 102 BRDAC0, 7, 28),
103 AD8400_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT, 103 AD8400_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT,
104 BRDAC0, 8, 29), 104 BRDAC0, 8, 29),
105 AD8402_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT, 105 AD8402_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT,
106 BRDAC0 | BRDAC1, 8, 30), 106 BRDAC0 | BRDAC1, 8, 30),
diff --git a/drivers/misc/carma/carma-fpga-program.c b/drivers/misc/carma/carma-fpga-program.c
index 7ce6065dc20e..eb5cd28bc6d8 100644
--- a/drivers/misc/carma/carma-fpga-program.c
+++ b/drivers/misc/carma/carma-fpga-program.c
@@ -945,8 +945,7 @@ static int fpga_of_remove(struct platform_device *op)
945/* CTL-CPLD Version Register */ 945/* CTL-CPLD Version Register */
946#define CTL_CPLD_VERSION 0x2000 946#define CTL_CPLD_VERSION 0x2000
947 947
948static int fpga_of_probe(struct platform_device *op, 948static int fpga_of_probe(struct platform_device *op)
949 const struct of_device_id *match)
950{ 949{
951 struct device_node *of_node = op->dev.of_node; 950 struct device_node *of_node = op->dev.of_node;
952 struct device *this_device; 951 struct device *this_device;
@@ -1107,7 +1106,7 @@ static struct of_device_id fpga_of_match[] = {
1107 {}, 1106 {},
1108}; 1107};
1109 1108
1110static struct of_platform_driver fpga_of_driver = { 1109static struct platform_driver fpga_of_driver = {
1111 .probe = fpga_of_probe, 1110 .probe = fpga_of_probe,
1112 .remove = fpga_of_remove, 1111 .remove = fpga_of_remove,
1113 .driver = { 1112 .driver = {
@@ -1124,12 +1123,12 @@ static struct of_platform_driver fpga_of_driver = {
1124static int __init fpga_init(void) 1123static int __init fpga_init(void)
1125{ 1124{
1126 led_trigger_register_simple("fpga", &ledtrig_fpga); 1125 led_trigger_register_simple("fpga", &ledtrig_fpga);
1127 return of_register_platform_driver(&fpga_of_driver); 1126 return platform_driver_register(&fpga_of_driver);
1128} 1127}
1129 1128
1130static void __exit fpga_exit(void) 1129static void __exit fpga_exit(void)
1131{ 1130{
1132 of_unregister_platform_driver(&fpga_of_driver); 1131 platform_driver_unregister(&fpga_of_driver);
1133 led_trigger_unregister_simple(ledtrig_fpga); 1132 led_trigger_unregister_simple(ledtrig_fpga);
1134} 1133}
1135 1134
diff --git a/drivers/misc/carma/carma-fpga.c b/drivers/misc/carma/carma-fpga.c
index 3965821fef17..14e974b2a781 100644
--- a/drivers/misc/carma/carma-fpga.c
+++ b/drivers/misc/carma/carma-fpga.c
@@ -1249,8 +1249,7 @@ static bool dma_filter(struct dma_chan *chan, void *data)
1249 return true; 1249 return true;
1250} 1250}
1251 1251
1252static int data_of_probe(struct platform_device *op, 1252static int data_of_probe(struct platform_device *op)
1253 const struct of_device_id *match)
1254{ 1253{
1255 struct device_node *of_node = op->dev.of_node; 1254 struct device_node *of_node = op->dev.of_node;
1256 struct device *this_device; 1255 struct device *this_device;
@@ -1401,7 +1400,7 @@ static struct of_device_id data_of_match[] = {
1401 {}, 1400 {},
1402}; 1401};
1403 1402
1404static struct of_platform_driver data_of_driver = { 1403static struct platform_driver data_of_driver = {
1405 .probe = data_of_probe, 1404 .probe = data_of_probe,
1406 .remove = data_of_remove, 1405 .remove = data_of_remove,
1407 .driver = { 1406 .driver = {
@@ -1417,12 +1416,12 @@ static struct of_platform_driver data_of_driver = {
1417 1416
1418static int __init data_init(void) 1417static int __init data_init(void)
1419{ 1418{
1420 return of_register_platform_driver(&data_of_driver); 1419 return platform_driver_register(&data_of_driver);
1421} 1420}
1422 1421
1423static void __exit data_exit(void) 1422static void __exit data_exit(void)
1424{ 1423{
1425 of_unregister_platform_driver(&data_of_driver); 1424 platform_driver_unregister(&data_of_driver);
1426} 1425}
1427 1426
1428MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>"); 1427MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>");
diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig
index 26cf12ca7f50..701edf658970 100644
--- a/drivers/misc/eeprom/Kconfig
+++ b/drivers/misc/eeprom/Kconfig
@@ -85,7 +85,7 @@ config EEPROM_93XX46
85 85
86config EEPROM_DIGSY_MTC_CFG 86config EEPROM_DIGSY_MTC_CFG
87 bool "DigsyMTC display configuration EEPROMs device" 87 bool "DigsyMTC display configuration EEPROMs device"
88 depends on PPC_MPC5200_GPIO && GPIOLIB && SPI_GPIO 88 depends on GPIO_MPC5200 && SPI_GPIO
89 help 89 help
90 This option enables access to display configuration EEPROMs 90 This option enables access to display configuration EEPROMs
91 on digsy_mtc board. You have to additionally select Microwire 91 on digsy_mtc board. You have to additionally select Microwire
diff --git a/drivers/misc/pch_phub.c b/drivers/misc/pch_phub.c
index dee33addcaeb..10fc4785dba7 100644
--- a/drivers/misc/pch_phub.c
+++ b/drivers/misc/pch_phub.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2010 OKI SEMICONDUCTOR CO., LTD. 2 * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by 5 * it under the terms of the GNU General Public License as published by
@@ -41,10 +41,10 @@
41#define PCH_PHUB_ROM_START_ADDR_EG20T 0x80 /* ROM data area start address offset 41#define PCH_PHUB_ROM_START_ADDR_EG20T 0x80 /* ROM data area start address offset
42 (Intel EG20T PCH)*/ 42 (Intel EG20T PCH)*/
43#define PCH_PHUB_ROM_START_ADDR_ML7213 0x400 /* ROM data area start address 43#define PCH_PHUB_ROM_START_ADDR_ML7213 0x400 /* ROM data area start address
44 offset(OKI SEMICONDUCTOR ML7213) 44 offset(LAPIS Semicon ML7213)
45 */ 45 */
46#define PCH_PHUB_ROM_START_ADDR_ML7223 0x400 /* ROM data area start address 46#define PCH_PHUB_ROM_START_ADDR_ML7223 0x400 /* ROM data area start address
47 offset(OKI SEMICONDUCTOR ML7223) 47 offset(LAPIS Semicon ML7223)
48 */ 48 */
49 49
50/* MAX number of INT_REDUCE_CONTROL registers */ 50/* MAX number of INT_REDUCE_CONTROL registers */
@@ -73,6 +73,9 @@
73#define PCI_DEVICE_ID_ROHM_ML7223_mPHUB 0x8012 /* for Bus-m */ 73#define PCI_DEVICE_ID_ROHM_ML7223_mPHUB 0x8012 /* for Bus-m */
74#define PCI_DEVICE_ID_ROHM_ML7223_nPHUB 0x8002 /* for Bus-n */ 74#define PCI_DEVICE_ID_ROHM_ML7223_nPHUB 0x8002 /* for Bus-n */
75 75
76/* Macros for ML7831 */
77#define PCI_DEVICE_ID_ROHM_ML7831_PHUB 0x8801
78
76/* SROM ACCESS Macro */ 79/* SROM ACCESS Macro */
77#define PCH_WORD_ADDR_MASK (~((1 << 2) - 1)) 80#define PCH_WORD_ADDR_MASK (~((1 << 2) - 1))
78 81
@@ -115,6 +118,7 @@
115 * @pch_mac_start_address: MAC address area start address 118 * @pch_mac_start_address: MAC address area start address
116 * @pch_opt_rom_start_address: Option ROM start address 119 * @pch_opt_rom_start_address: Option ROM start address
117 * @ioh_type: Save IOH type 120 * @ioh_type: Save IOH type
121 * @pdev: pointer to pci device struct
118 */ 122 */
119struct pch_phub_reg { 123struct pch_phub_reg {
120 u32 phub_id_reg; 124 u32 phub_id_reg;
@@ -136,6 +140,7 @@ struct pch_phub_reg {
136 u32 pch_mac_start_address; 140 u32 pch_mac_start_address;
137 u32 pch_opt_rom_start_address; 141 u32 pch_opt_rom_start_address;
138 int ioh_type; 142 int ioh_type;
143 struct pci_dev *pdev;
139}; 144};
140 145
141/* SROM SPEC for MAC address assignment offset */ 146/* SROM SPEC for MAC address assignment offset */
@@ -471,7 +476,7 @@ static int pch_phub_write_gbe_mac_addr(struct pch_phub_reg *chip, u8 *data)
471 int retval; 476 int retval;
472 int i; 477 int i;
473 478
474 if (chip->ioh_type == 1) /* EG20T */ 479 if ((chip->ioh_type == 1) || (chip->ioh_type == 5)) /* EG20T or ML7831*/
475 retval = pch_phub_gbe_serial_rom_conf(chip); 480 retval = pch_phub_gbe_serial_rom_conf(chip);
476 else /* ML7223 */ 481 else /* ML7223 */
477 retval = pch_phub_gbe_serial_rom_conf_mp(chip); 482 retval = pch_phub_gbe_serial_rom_conf_mp(chip);
@@ -498,6 +503,7 @@ static ssize_t pch_phub_bin_read(struct file *filp, struct kobject *kobj,
498 unsigned int orom_size; 503 unsigned int orom_size;
499 int ret; 504 int ret;
500 int err; 505 int err;
506 ssize_t rom_size;
501 507
502 struct pch_phub_reg *chip = 508 struct pch_phub_reg *chip =
503 dev_get_drvdata(container_of(kobj, struct device, kobj)); 509 dev_get_drvdata(container_of(kobj, struct device, kobj));
@@ -509,6 +515,10 @@ static ssize_t pch_phub_bin_read(struct file *filp, struct kobject *kobj,
509 } 515 }
510 516
511 /* Get Rom signature */ 517 /* Get Rom signature */
518 chip->pch_phub_extrom_base_address = pci_map_rom(chip->pdev, &rom_size);
519 if (!chip->pch_phub_extrom_base_address)
520 goto exrom_map_err;
521
512 pch_phub_read_serial_rom(chip, chip->pch_opt_rom_start_address, 522 pch_phub_read_serial_rom(chip, chip->pch_opt_rom_start_address,
513 (unsigned char *)&rom_signature); 523 (unsigned char *)&rom_signature);
514 rom_signature &= 0xff; 524 rom_signature &= 0xff;
@@ -539,10 +549,13 @@ static ssize_t pch_phub_bin_read(struct file *filp, struct kobject *kobj,
539 goto return_err; 549 goto return_err;
540 } 550 }
541return_ok: 551return_ok:
552 pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address);
542 mutex_unlock(&pch_phub_mutex); 553 mutex_unlock(&pch_phub_mutex);
543 return addr_offset; 554 return addr_offset;
544 555
545return_err: 556return_err:
557 pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address);
558exrom_map_err:
546 mutex_unlock(&pch_phub_mutex); 559 mutex_unlock(&pch_phub_mutex);
547return_err_nomutex: 560return_err_nomutex:
548 return err; 561 return err;
@@ -555,6 +568,7 @@ static ssize_t pch_phub_bin_write(struct file *filp, struct kobject *kobj,
555 int err; 568 int err;
556 unsigned int addr_offset; 569 unsigned int addr_offset;
557 int ret; 570 int ret;
571 ssize_t rom_size;
558 struct pch_phub_reg *chip = 572 struct pch_phub_reg *chip =
559 dev_get_drvdata(container_of(kobj, struct device, kobj)); 573 dev_get_drvdata(container_of(kobj, struct device, kobj));
560 574
@@ -571,6 +585,12 @@ static ssize_t pch_phub_bin_write(struct file *filp, struct kobject *kobj,
571 goto return_ok; 585 goto return_ok;
572 } 586 }
573 587
588 chip->pch_phub_extrom_base_address = pci_map_rom(chip->pdev, &rom_size);
589 if (!chip->pch_phub_extrom_base_address) {
590 err = -ENOMEM;
591 goto exrom_map_err;
592 }
593
574 for (addr_offset = 0; addr_offset < count; addr_offset++) { 594 for (addr_offset = 0; addr_offset < count; addr_offset++) {
575 if (PCH_PHUB_OROM_SIZE < off + addr_offset) 595 if (PCH_PHUB_OROM_SIZE < off + addr_offset)
576 goto return_ok; 596 goto return_ok;
@@ -585,10 +605,14 @@ static ssize_t pch_phub_bin_write(struct file *filp, struct kobject *kobj,
585 } 605 }
586 606
587return_ok: 607return_ok:
608 pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address);
588 mutex_unlock(&pch_phub_mutex); 609 mutex_unlock(&pch_phub_mutex);
589 return addr_offset; 610 return addr_offset;
590 611
591return_err: 612return_err:
613 pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address);
614
615exrom_map_err:
592 mutex_unlock(&pch_phub_mutex); 616 mutex_unlock(&pch_phub_mutex);
593 return err; 617 return err;
594} 618}
@@ -598,8 +622,14 @@ static ssize_t show_pch_mac(struct device *dev, struct device_attribute *attr,
598{ 622{
599 u8 mac[8]; 623 u8 mac[8];
600 struct pch_phub_reg *chip = dev_get_drvdata(dev); 624 struct pch_phub_reg *chip = dev_get_drvdata(dev);
625 ssize_t rom_size;
626
627 chip->pch_phub_extrom_base_address = pci_map_rom(chip->pdev, &rom_size);
628 if (!chip->pch_phub_extrom_base_address)
629 return -ENOMEM;
601 630
602 pch_phub_read_gbe_mac_addr(chip, mac); 631 pch_phub_read_gbe_mac_addr(chip, mac);
632 pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address);
603 633
604 return sprintf(buf, "%pM\n", mac); 634 return sprintf(buf, "%pM\n", mac);
605} 635}
@@ -608,6 +638,7 @@ static ssize_t store_pch_mac(struct device *dev, struct device_attribute *attr,
608 const char *buf, size_t count) 638 const char *buf, size_t count)
609{ 639{
610 u8 mac[6]; 640 u8 mac[6];
641 ssize_t rom_size;
611 struct pch_phub_reg *chip = dev_get_drvdata(dev); 642 struct pch_phub_reg *chip = dev_get_drvdata(dev);
612 643
613 if (count != 18) 644 if (count != 18)
@@ -617,7 +648,12 @@ static ssize_t store_pch_mac(struct device *dev, struct device_attribute *attr,
617 (u32 *)&mac[0], (u32 *)&mac[1], (u32 *)&mac[2], (u32 *)&mac[3], 648 (u32 *)&mac[0], (u32 *)&mac[1], (u32 *)&mac[2], (u32 *)&mac[3],
618 (u32 *)&mac[4], (u32 *)&mac[5]); 649 (u32 *)&mac[4], (u32 *)&mac[5]);
619 650
651 chip->pch_phub_extrom_base_address = pci_map_rom(chip->pdev, &rom_size);
652 if (!chip->pch_phub_extrom_base_address)
653 return -ENOMEM;
654
620 pch_phub_write_gbe_mac_addr(chip, mac); 655 pch_phub_write_gbe_mac_addr(chip, mac);
656 pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address);
621 657
622 return count; 658 return count;
623} 659}
@@ -640,7 +676,6 @@ static int __devinit pch_phub_probe(struct pci_dev *pdev,
640 int retval; 676 int retval;
641 677
642 int ret; 678 int ret;
643 ssize_t rom_size;
644 struct pch_phub_reg *chip; 679 struct pch_phub_reg *chip;
645 680
646 chip = kzalloc(sizeof(struct pch_phub_reg), GFP_KERNEL); 681 chip = kzalloc(sizeof(struct pch_phub_reg), GFP_KERNEL);
@@ -677,19 +712,7 @@ static int __devinit pch_phub_probe(struct pci_dev *pdev,
677 "in pch_phub_base_address variable is %p\n", __func__, 712 "in pch_phub_base_address variable is %p\n", __func__,
678 chip->pch_phub_base_address); 713 chip->pch_phub_base_address);
679 714
680 if (id->driver_data != 3) { 715 chip->pdev = pdev; /* Save pci device struct */
681 chip->pch_phub_extrom_base_address =\
682 pci_map_rom(pdev, &rom_size);
683 if (chip->pch_phub_extrom_base_address == 0) {
684 dev_err(&pdev->dev, "%s: pci_map_rom FAILED", __func__);
685 ret = -ENOMEM;
686 goto err_pci_map;
687 }
688 dev_dbg(&pdev->dev, "%s : "
689 "pci_map_rom SUCCESS and value in "
690 "pch_phub_extrom_base_address variable is %p\n",
691 __func__, chip->pch_phub_extrom_base_address);
692 }
693 716
694 if (id->driver_data == 1) { /* EG20T PCH */ 717 if (id->driver_data == 1) { /* EG20T PCH */
695 const char *board_name; 718 const char *board_name;
@@ -763,6 +786,22 @@ static int __devinit pch_phub_probe(struct pci_dev *pdev,
763 chip->pch_opt_rom_start_address =\ 786 chip->pch_opt_rom_start_address =\
764 PCH_PHUB_ROM_START_ADDR_ML7223; 787 PCH_PHUB_ROM_START_ADDR_ML7223;
765 chip->pch_mac_start_address = PCH_PHUB_MAC_START_ADDR_ML7223; 788 chip->pch_mac_start_address = PCH_PHUB_MAC_START_ADDR_ML7223;
789 } else if (id->driver_data == 5) { /* ML7831 */
790 retval = sysfs_create_file(&pdev->dev.kobj,
791 &dev_attr_pch_mac.attr);
792 if (retval)
793 goto err_sysfs_create;
794
795 retval = sysfs_create_bin_file(&pdev->dev.kobj, &pch_bin_attr);
796 if (retval)
797 goto exit_bin_attr;
798
799 /* set the prefech value */
800 iowrite32(0x000affaa, chip->pch_phub_base_address + 0x14);
801 /* set the interrupt delay value */
802 iowrite32(0x25, chip->pch_phub_base_address + 0x44);
803 chip->pch_opt_rom_start_address = PCH_PHUB_ROM_START_ADDR_EG20T;
804 chip->pch_mac_start_address = PCH_PHUB_MAC_START_ADDR_EG20T;
766 } 805 }
767 806
768 chip->ioh_type = id->driver_data; 807 chip->ioh_type = id->driver_data;
@@ -773,8 +812,6 @@ exit_bin_attr:
773 sysfs_remove_file(&pdev->dev.kobj, &dev_attr_pch_mac.attr); 812 sysfs_remove_file(&pdev->dev.kobj, &dev_attr_pch_mac.attr);
774 813
775err_sysfs_create: 814err_sysfs_create:
776 pci_unmap_rom(pdev, chip->pch_phub_extrom_base_address);
777err_pci_map:
778 pci_iounmap(pdev, chip->pch_phub_base_address); 815 pci_iounmap(pdev, chip->pch_phub_base_address);
779err_pci_iomap: 816err_pci_iomap:
780 pci_release_regions(pdev); 817 pci_release_regions(pdev);
@@ -792,7 +829,6 @@ static void __devexit pch_phub_remove(struct pci_dev *pdev)
792 829
793 sysfs_remove_file(&pdev->dev.kobj, &dev_attr_pch_mac.attr); 830 sysfs_remove_file(&pdev->dev.kobj, &dev_attr_pch_mac.attr);
794 sysfs_remove_bin_file(&pdev->dev.kobj, &pch_bin_attr); 831 sysfs_remove_bin_file(&pdev->dev.kobj, &pch_bin_attr);
795 pci_unmap_rom(pdev, chip->pch_phub_extrom_base_address);
796 pci_iounmap(pdev, chip->pch_phub_base_address); 832 pci_iounmap(pdev, chip->pch_phub_base_address);
797 pci_release_regions(pdev); 833 pci_release_regions(pdev);
798 pci_disable_device(pdev); 834 pci_disable_device(pdev);
@@ -847,6 +883,7 @@ static struct pci_device_id pch_phub_pcidev_id[] = {
847 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7213_PHUB), 2, }, 883 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7213_PHUB), 2, },
848 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7223_mPHUB), 3, }, 884 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7223_mPHUB), 3, },
849 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7223_nPHUB), 4, }, 885 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7223_nPHUB), 4, },
886 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7831_PHUB), 5, },
850 { } 887 { }
851}; 888};
852MODULE_DEVICE_TABLE(pci, pch_phub_pcidev_id); 889MODULE_DEVICE_TABLE(pci, pch_phub_pcidev_id);
@@ -873,5 +910,5 @@ static void __exit pch_phub_pci_exit(void)
873module_init(pch_phub_pci_init); 910module_init(pch_phub_pci_init);
874module_exit(pch_phub_pci_exit); 911module_exit(pch_phub_pci_exit);
875 912
876MODULE_DESCRIPTION("Intel EG20T PCH/OKI SEMICONDUCTOR IOH(ML7213/ML7223) PHUB"); 913MODULE_DESCRIPTION("Intel EG20T PCH/LAPIS Semiconductor IOH(ML7213/ML7223) PHUB");
877MODULE_LICENSE("GPL"); 914MODULE_LICENSE("GPL");
diff --git a/drivers/misc/spear13xx_pcie_gadget.c b/drivers/misc/spear13xx_pcie_gadget.c
index cfbddbef11de..43d073bc1d9c 100644
--- a/drivers/misc/spear13xx_pcie_gadget.c
+++ b/drivers/misc/spear13xx_pcie_gadget.c
@@ -903,6 +903,6 @@ static void __exit spear_pcie_gadget_exit(void)
903} 903}
904module_exit(spear_pcie_gadget_exit); 904module_exit(spear_pcie_gadget_exit);
905 905
906MODULE_ALIAS("pcie-gadget-spear"); 906MODULE_ALIAS("platform:pcie-gadget-spear");
907MODULE_AUTHOR("Pratyush Anand"); 907MODULE_AUTHOR("Pratyush Anand");
908MODULE_LICENSE("GPL"); 908MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index a1cb21f95302..1e0e27cbe987 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -1606,6 +1606,14 @@ static const struct mmc_fixup blk_fixups[] =
1606 MMC_QUIRK_BLK_NO_CMD23), 1606 MMC_QUIRK_BLK_NO_CMD23),
1607 MMC_FIXUP("MMC32G", 0x11, CID_OEMID_ANY, add_quirk_mmc, 1607 MMC_FIXUP("MMC32G", 0x11, CID_OEMID_ANY, add_quirk_mmc,
1608 MMC_QUIRK_BLK_NO_CMD23), 1608 MMC_QUIRK_BLK_NO_CMD23),
1609
1610 /*
1611 * Some Micron MMC cards needs longer data read timeout than
1612 * indicated in CSD.
1613 */
1614 MMC_FIXUP(CID_NAME_ANY, 0x13, 0x200, add_quirk_mmc,
1615 MMC_QUIRK_LONG_READ_TIME),
1616
1609 END_FIXUP 1617 END_FIXUP
1610}; 1618};
1611 1619
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 5278ffb20e74..950b97d7412a 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -529,6 +529,18 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
529 data->timeout_clks = 0; 529 data->timeout_clks = 0;
530 } 530 }
531 } 531 }
532
533 /*
534 * Some cards require longer data read timeout than indicated in CSD.
535 * Address this by setting the read timeout to a "reasonably high"
536 * value. For the cards tested, 300ms has proven enough. If necessary,
537 * this value can be increased if other problematic cards require this.
538 */
539 if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
540 data->timeout_ns = 300000000;
541 data->timeout_clks = 0;
542 }
543
532 /* 544 /*
533 * Some cards need very high timeouts if driven in SPI mode. 545 * Some cards need very high timeouts if driven in SPI mode.
534 * The worst observed timeout was 900ms after writing a 546 * The worst observed timeout was 900ms after writing a
@@ -1213,6 +1225,46 @@ void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
1213 mmc_host_clk_release(host); 1225 mmc_host_clk_release(host);
1214} 1226}
1215 1227
1228static void mmc_poweroff_notify(struct mmc_host *host)
1229{
1230 struct mmc_card *card;
1231 unsigned int timeout;
1232 unsigned int notify_type = EXT_CSD_NO_POWER_NOTIFICATION;
1233 int err = 0;
1234
1235 card = host->card;
1236
1237 /*
1238 * Send power notify command only if card
1239 * is mmc and notify state is powered ON
1240 */
1241 if (card && mmc_card_mmc(card) &&
1242 (card->poweroff_notify_state == MMC_POWERED_ON)) {
1243
1244 if (host->power_notify_type == MMC_HOST_PW_NOTIFY_SHORT) {
1245 notify_type = EXT_CSD_POWER_OFF_SHORT;
1246 timeout = card->ext_csd.generic_cmd6_time;
1247 card->poweroff_notify_state = MMC_POWEROFF_SHORT;
1248 } else {
1249 notify_type = EXT_CSD_POWER_OFF_LONG;
1250 timeout = card->ext_csd.power_off_longtime;
1251 card->poweroff_notify_state = MMC_POWEROFF_LONG;
1252 }
1253
1254 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1255 EXT_CSD_POWER_OFF_NOTIFICATION,
1256 notify_type, timeout);
1257
1258 if (err && err != -EBADMSG)
1259 pr_err("Device failed to respond within %d poweroff "
1260 "time. Forcefully powering down the device\n",
1261 timeout);
1262
1263 /* Set the card state to no notification after the poweroff */
1264 card->poweroff_notify_state = MMC_NO_POWER_NOTIFICATION;
1265 }
1266}
1267
1216/* 1268/*
1217 * Apply power to the MMC stack. This is a two-stage process. 1269 * Apply power to the MMC stack. This is a two-stage process.
1218 * First, we enable power to the card without the clock running. 1270 * First, we enable power to the card without the clock running.
@@ -1269,42 +1321,12 @@ static void mmc_power_up(struct mmc_host *host)
1269 1321
1270void mmc_power_off(struct mmc_host *host) 1322void mmc_power_off(struct mmc_host *host)
1271{ 1323{
1272 struct mmc_card *card;
1273 unsigned int notify_type;
1274 unsigned int timeout;
1275 int err;
1276
1277 mmc_host_clk_hold(host); 1324 mmc_host_clk_hold(host);
1278 1325
1279 card = host->card;
1280 host->ios.clock = 0; 1326 host->ios.clock = 0;
1281 host->ios.vdd = 0; 1327 host->ios.vdd = 0;
1282 1328
1283 if (card && mmc_card_mmc(card) && 1329 mmc_poweroff_notify(host);
1284 (card->poweroff_notify_state == MMC_POWERED_ON)) {
1285
1286 if (host->power_notify_type == MMC_HOST_PW_NOTIFY_SHORT) {
1287 notify_type = EXT_CSD_POWER_OFF_SHORT;
1288 timeout = card->ext_csd.generic_cmd6_time;
1289 card->poweroff_notify_state = MMC_POWEROFF_SHORT;
1290 } else {
1291 notify_type = EXT_CSD_POWER_OFF_LONG;
1292 timeout = card->ext_csd.power_off_longtime;
1293 card->poweroff_notify_state = MMC_POWEROFF_LONG;
1294 }
1295
1296 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1297 EXT_CSD_POWER_OFF_NOTIFICATION,
1298 notify_type, timeout);
1299
1300 if (err && err != -EBADMSG)
1301 pr_err("Device failed to respond within %d poweroff "
1302 "time. Forcefully powering down the device\n",
1303 timeout);
1304
1305 /* Set the card state to no notification after the poweroff */
1306 card->poweroff_notify_state = MMC_NO_POWER_NOTIFICATION;
1307 }
1308 1330
1309 /* 1331 /*
1310 * Reset ocr mask to be the highest possible voltage supported for 1332 * Reset ocr mask to be the highest possible voltage supported for
@@ -2196,7 +2218,7 @@ int mmc_card_sleep(struct mmc_host *host)
2196 2218
2197 mmc_bus_get(host); 2219 mmc_bus_get(host);
2198 2220
2199 if (host->bus_ops && !host->bus_dead && host->bus_ops->awake) 2221 if (host->bus_ops && !host->bus_dead && host->bus_ops->sleep)
2200 err = host->bus_ops->sleep(host); 2222 err = host->bus_ops->sleep(host);
2201 2223
2202 mmc_bus_put(host); 2224 mmc_bus_put(host);
@@ -2302,8 +2324,17 @@ int mmc_suspend_host(struct mmc_host *host)
2302 * pre-claim the host. 2324 * pre-claim the host.
2303 */ 2325 */
2304 if (mmc_try_claim_host(host)) { 2326 if (mmc_try_claim_host(host)) {
2305 if (host->bus_ops->suspend) 2327 if (host->bus_ops->suspend) {
2328 /*
2329 * For eMMC 4.5 device send notify command
2330 * before sleep, because in sleep state eMMC 4.5
2331 * devices respond to only RESET and AWAKE cmd
2332 */
2333 mmc_poweroff_notify(host);
2306 err = host->bus_ops->suspend(host); 2334 err = host->bus_ops->suspend(host);
2335 }
2336 mmc_do_release_host(host);
2337
2307 if (err == -ENOSYS || !host->bus_ops->resume) { 2338 if (err == -ENOSYS || !host->bus_ops->resume) {
2308 /* 2339 /*
2309 * We simply "remove" the card in this case. 2340 * We simply "remove" the card in this case.
@@ -2318,7 +2349,6 @@ int mmc_suspend_host(struct mmc_host *host)
2318 host->pm_flags = 0; 2349 host->pm_flags = 0;
2319 err = 0; 2350 err = 0;
2320 } 2351 }
2321 mmc_do_release_host(host);
2322 } else { 2352 } else {
2323 err = -EBUSY; 2353 err = -EBUSY;
2324 } 2354 }
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index dbf421a6279c..d240427c1246 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -876,17 +876,21 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
876 * set the notification byte in the ext_csd register of device 876 * set the notification byte in the ext_csd register of device
877 */ 877 */
878 if ((host->caps2 & MMC_CAP2_POWEROFF_NOTIFY) && 878 if ((host->caps2 & MMC_CAP2_POWEROFF_NOTIFY) &&
879 (card->poweroff_notify_state == MMC_NO_POWER_NOTIFICATION)) { 879 (card->ext_csd.rev >= 6)) {
880 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 880 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
881 EXT_CSD_POWER_OFF_NOTIFICATION, 881 EXT_CSD_POWER_OFF_NOTIFICATION,
882 EXT_CSD_POWER_ON, 882 EXT_CSD_POWER_ON,
883 card->ext_csd.generic_cmd6_time); 883 card->ext_csd.generic_cmd6_time);
884 if (err && err != -EBADMSG) 884 if (err && err != -EBADMSG)
885 goto free_card; 885 goto free_card;
886 }
887 886
888 if (!err) 887 /*
889 card->poweroff_notify_state = MMC_POWERED_ON; 888 * The err can be -EBADMSG or 0,
889 * so check for success and update the flag
890 */
891 if (!err)
892 card->poweroff_notify_state = MMC_POWERED_ON;
893 }
890 894
891 /* 895 /*
892 * Activate high speed (if supported) 896 * Activate high speed (if supported)
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index 325ea61e12d3..8e0fbe994047 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -732,6 +732,7 @@ static void mxcmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
732 "failed to config DMA channel. Falling back to PIO\n"); 732 "failed to config DMA channel. Falling back to PIO\n");
733 dma_release_channel(host->dma); 733 dma_release_channel(host->dma);
734 host->do_dma = 0; 734 host->do_dma = 0;
735 host->dma = NULL;
735 } 736 }
736 } 737 }
737 738
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 101cd31c8220..d5fe43d53c51 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -1010,6 +1010,7 @@ static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
1010 host->data->sg_len, 1010 host->data->sg_len,
1011 omap_hsmmc_get_dma_dir(host, host->data)); 1011 omap_hsmmc_get_dma_dir(host, host->data));
1012 omap_free_dma(dma_ch); 1012 omap_free_dma(dma_ch);
1013 host->data->host_cookie = 0;
1013 } 1014 }
1014 host->data = NULL; 1015 host->data = NULL;
1015} 1016}
@@ -1575,8 +1576,10 @@ static void omap_hsmmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
1575 struct mmc_data *data = mrq->data; 1576 struct mmc_data *data = mrq->data;
1576 1577
1577 if (host->use_dma) { 1578 if (host->use_dma) {
1578 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 1579 if (data->host_cookie)
1579 omap_hsmmc_get_dma_dir(host, data)); 1580 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
1581 data->sg_len,
1582 omap_hsmmc_get_dma_dir(host, data));
1580 data->host_cookie = 0; 1583 data->host_cookie = 0;
1581 } 1584 }
1582} 1585}
diff --git a/drivers/mmc/host/sdhci-cns3xxx.c b/drivers/mmc/host/sdhci-cns3xxx.c
index 4b920b7621cf..87b6f079b6e0 100644
--- a/drivers/mmc/host/sdhci-cns3xxx.c
+++ b/drivers/mmc/host/sdhci-cns3xxx.c
@@ -15,6 +15,7 @@
15#include <linux/delay.h> 15#include <linux/delay.h>
16#include <linux/device.h> 16#include <linux/device.h>
17#include <linux/mmc/host.h> 17#include <linux/mmc/host.h>
18#include <linux/module.h>
18#include <mach/cns3xxx.h> 19#include <mach/cns3xxx.h>
19#include "sdhci-pltfm.h" 20#include "sdhci-pltfm.h"
20 21
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index ae57769ba50d..4b976f00ea85 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -32,6 +32,7 @@
32/* VENDOR SPEC register */ 32/* VENDOR SPEC register */
33#define SDHCI_VENDOR_SPEC 0xC0 33#define SDHCI_VENDOR_SPEC 0xC0
34#define SDHCI_VENDOR_SPEC_SDIO_QUIRK 0x00000002 34#define SDHCI_VENDOR_SPEC_SDIO_QUIRK 0x00000002
35#define SDHCI_WTMK_LVL 0x44
35#define SDHCI_MIX_CTRL 0x48 36#define SDHCI_MIX_CTRL 0x48
36 37
37/* 38/*
@@ -476,6 +477,13 @@ static int __devinit sdhci_esdhc_imx_probe(struct platform_device *pdev)
476 if (is_imx53_esdhc(imx_data)) 477 if (is_imx53_esdhc(imx_data))
477 imx_data->flags |= ESDHC_FLAG_MULTIBLK_NO_INT; 478 imx_data->flags |= ESDHC_FLAG_MULTIBLK_NO_INT;
478 479
480 /*
481 * The imx6q ROM code will change the default watermark level setting
482 * to something insane. Change it back here.
483 */
484 if (is_imx6q_usdhc(imx_data))
485 writel(0x08100810, host->ioaddr + SDHCI_WTMK_LVL);
486
479 boarddata = &imx_data->boarddata; 487 boarddata = &imx_data->boarddata;
480 if (sdhci_esdhc_imx_probe_dt(pdev, boarddata) < 0) { 488 if (sdhci_esdhc_imx_probe_dt(pdev, boarddata) < 0) {
481 if (!host->mmc->parent->platform_data) { 489 if (!host->mmc->parent->platform_data) {
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 3d00e722efc9..cb60c4197e0a 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -644,8 +644,6 @@ static int sdhci_s3c_resume(struct platform_device *dev)
644static struct platform_driver sdhci_s3c_driver = { 644static struct platform_driver sdhci_s3c_driver = {
645 .probe = sdhci_s3c_probe, 645 .probe = sdhci_s3c_probe,
646 .remove = __devexit_p(sdhci_s3c_remove), 646 .remove = __devexit_p(sdhci_s3c_remove),
647 .suspend = sdhci_s3c_suspend,
648 .resume = sdhci_s3c_resume,
649 .driver = { 647 .driver = {
650 .owner = THIS_MODULE, 648 .owner = THIS_MODULE,
651 .name = "s3c-sdhci", 649 .name = "s3c-sdhci",
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 369366c8e205..d5505f3fe2a1 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -908,7 +908,7 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
908 if (host->power) { 908 if (host->power) {
909 pm_runtime_put(&host->pd->dev); 909 pm_runtime_put(&host->pd->dev);
910 host->power = false; 910 host->power = false;
911 if (p->down_pwr) 911 if (p->down_pwr && ios->power_mode == MMC_POWER_OFF)
912 p->down_pwr(host->pd); 912 p->down_pwr(host->pd);
913 } 913 }
914 host->state = STATE_IDLE; 914 host->state = STATE_IDLE;
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
index d85a60cda167..4208b3958069 100644
--- a/drivers/mmc/host/tmio_mmc_pio.c
+++ b/drivers/mmc/host/tmio_mmc_pio.c
@@ -798,7 +798,7 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
798 /* start bus clock */ 798 /* start bus clock */
799 tmio_mmc_clk_start(host); 799 tmio_mmc_clk_start(host);
800 } else if (ios->power_mode != MMC_POWER_UP) { 800 } else if (ios->power_mode != MMC_POWER_UP) {
801 if (host->set_pwr) 801 if (host->set_pwr && ios->power_mode == MMC_POWER_OFF)
802 host->set_pwr(host->pdev, 0); 802 host->set_pwr(host->pdev, 0);
803 if ((pdata->flags & TMIO_MMC_HAS_COLD_CD) && 803 if ((pdata->flags & TMIO_MMC_HAS_COLD_CD) &&
804 pdata->power) { 804 pdata->power) {
diff --git a/drivers/mtd/maps/bcm963xx-flash.c b/drivers/mtd/maps/bcm963xx-flash.c
index 608967fe74c6..736ca10ca9f1 100644
--- a/drivers/mtd/maps/bcm963xx-flash.c
+++ b/drivers/mtd/maps/bcm963xx-flash.c
@@ -21,6 +21,7 @@
21#include <linux/init.h> 21#include <linux/init.h>
22#include <linux/kernel.h> 22#include <linux/kernel.h>
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include <linux/module.h>
24#include <linux/mtd/map.h> 25#include <linux/mtd/map.h>
25#include <linux/mtd/mtd.h> 26#include <linux/mtd/mtd.h>
26#include <linux/mtd/partitions.h> 27#include <linux/mtd/partitions.h>
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 583f66cd5bbd..654a5e94e0e7 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -245,6 +245,8 @@ source "drivers/net/ethernet/Kconfig"
245 245
246source "drivers/net/fddi/Kconfig" 246source "drivers/net/fddi/Kconfig"
247 247
248source "drivers/net/hippi/Kconfig"
249
248config NET_SB1000 250config NET_SB1000
249 tristate "General Instruments Surfboard 1000" 251 tristate "General Instruments Surfboard 1000"
250 depends on PNP 252 depends on PNP
diff --git a/drivers/net/arcnet/Kconfig b/drivers/net/arcnet/Kconfig
index a73d9dc80ff6..84fb6349a59a 100644
--- a/drivers/net/arcnet/Kconfig
+++ b/drivers/net/arcnet/Kconfig
@@ -4,7 +4,7 @@
4 4
5menuconfig ARCNET 5menuconfig ARCNET
6 depends on NETDEVICES && (ISA || PCI || PCMCIA) 6 depends on NETDEVICES && (ISA || PCI || PCMCIA)
7 bool "ARCnet support" 7 tristate "ARCnet support"
8 ---help--- 8 ---help---
9 If you have a network card of this type, say Y and check out the 9 If you have a network card of this type, say Y and check out the
10 (arguably) beautiful poetry in 10 (arguably) beautiful poetry in
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index b0c577256487..7f8756825b8a 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2553,30 +2553,6 @@ re_arm:
2553 } 2553 }
2554} 2554}
2555 2555
2556static __be32 bond_glean_dev_ip(struct net_device *dev)
2557{
2558 struct in_device *idev;
2559 struct in_ifaddr *ifa;
2560 __be32 addr = 0;
2561
2562 if (!dev)
2563 return 0;
2564
2565 rcu_read_lock();
2566 idev = __in_dev_get_rcu(dev);
2567 if (!idev)
2568 goto out;
2569
2570 ifa = idev->ifa_list;
2571 if (!ifa)
2572 goto out;
2573
2574 addr = ifa->ifa_local;
2575out:
2576 rcu_read_unlock();
2577 return addr;
2578}
2579
2580static int bond_has_this_ip(struct bonding *bond, __be32 ip) 2556static int bond_has_this_ip(struct bonding *bond, __be32 ip)
2581{ 2557{
2582 struct vlan_entry *vlan; 2558 struct vlan_entry *vlan;
@@ -3322,6 +3298,10 @@ static int bond_inetaddr_event(struct notifier_block *this, unsigned long event,
3322 struct bonding *bond; 3298 struct bonding *bond;
3323 struct vlan_entry *vlan; 3299 struct vlan_entry *vlan;
3324 3300
3301 /* we only care about primary address */
3302 if(ifa->ifa_flags & IFA_F_SECONDARY)
3303 return NOTIFY_DONE;
3304
3325 list_for_each_entry(bond, &bn->dev_list, bond_list) { 3305 list_for_each_entry(bond, &bn->dev_list, bond_list) {
3326 if (bond->dev == event_dev) { 3306 if (bond->dev == event_dev) {
3327 switch (event) { 3307 switch (event) {
@@ -3329,7 +3309,7 @@ static int bond_inetaddr_event(struct notifier_block *this, unsigned long event,
3329 bond->master_ip = ifa->ifa_local; 3309 bond->master_ip = ifa->ifa_local;
3330 return NOTIFY_OK; 3310 return NOTIFY_OK;
3331 case NETDEV_DOWN: 3311 case NETDEV_DOWN:
3332 bond->master_ip = bond_glean_dev_ip(bond->dev); 3312 bond->master_ip = 0;
3333 return NOTIFY_OK; 3313 return NOTIFY_OK;
3334 default: 3314 default:
3335 return NOTIFY_DONE; 3315 return NOTIFY_DONE;
@@ -3345,8 +3325,7 @@ static int bond_inetaddr_event(struct notifier_block *this, unsigned long event,
3345 vlan->vlan_ip = ifa->ifa_local; 3325 vlan->vlan_ip = ifa->ifa_local;
3346 return NOTIFY_OK; 3326 return NOTIFY_OK;
3347 case NETDEV_DOWN: 3327 case NETDEV_DOWN:
3348 vlan->vlan_ip = 3328 vlan->vlan_ip = 0;
3349 bond_glean_dev_ip(vlan_dev);
3350 return NOTIFY_OK; 3329 return NOTIFY_OK;
3351 default: 3330 default:
3352 return NOTIFY_DONE; 3331 return NOTIFY_DONE;
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 5a20804fdece..4ef7e2fd9fe6 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -319,6 +319,13 @@ static ssize_t bonding_store_mode(struct device *d,
319 goto out; 319 goto out;
320 } 320 }
321 321
322 if (bond->slave_cnt > 0) {
323 pr_err("unable to update mode of %s because it has slaves.\n",
324 bond->dev->name);
325 ret = -EPERM;
326 goto out;
327 }
328
322 new_value = bond_parse_parm(buf, bond_mode_tbl); 329 new_value = bond_parse_parm(buf, bond_mode_tbl);
323 if (new_value < 0) { 330 if (new_value < 0) {
324 pr_err("%s: Ignoring invalid mode value %.*s.\n", 331 pr_err("%s: Ignoring invalid mode value %.*s.\n",
diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
index 905bce0b3a43..2c7f5036f570 100644
--- a/drivers/net/can/sja1000/peak_pci.c
+++ b/drivers/net/can/sja1000/peak_pci.c
@@ -20,7 +20,6 @@
20 */ 20 */
21 21
22#include <linux/kernel.h> 22#include <linux/kernel.h>
23#include <linux/version.h>
24#include <linux/module.h> 23#include <linux/module.h>
25#include <linux/interrupt.h> 24#include <linux/interrupt.h>
26#include <linux/netdevice.h> 25#include <linux/netdevice.h>
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 4cf835dbc122..3fb66d09ece5 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -608,7 +608,7 @@ static void b44_tx(struct b44 *bp)
608 skb->len, 608 skb->len,
609 DMA_TO_DEVICE); 609 DMA_TO_DEVICE);
610 rp->skb = NULL; 610 rp->skb = NULL;
611 dev_kfree_skb(skb); 611 dev_kfree_skb_irq(skb);
612 } 612 }
613 613
614 bp->tx_cons = cons; 614 bp->tx_cons = cons;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index bce203fa4b9e..882f48f0a03c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -10327,6 +10327,43 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
10327 return 0; 10327 return 0;
10328} 10328}
10329 10329
10330
10331static void bnx2x_5461x_set_link_led(struct bnx2x_phy *phy,
10332 struct link_params *params, u8 mode)
10333{
10334 struct bnx2x *bp = params->bp;
10335 u16 temp;
10336
10337 bnx2x_cl22_write(bp, phy,
10338 MDIO_REG_GPHY_SHADOW,
10339 MDIO_REG_GPHY_SHADOW_LED_SEL1);
10340 bnx2x_cl22_read(bp, phy,
10341 MDIO_REG_GPHY_SHADOW,
10342 &temp);
10343 temp &= 0xff00;
10344
10345 DP(NETIF_MSG_LINK, "54618x set link led (mode=%x)\n", mode);
10346 switch (mode) {
10347 case LED_MODE_FRONT_PANEL_OFF:
10348 case LED_MODE_OFF:
10349 temp |= 0x00ee;
10350 break;
10351 case LED_MODE_OPER:
10352 temp |= 0x0001;
10353 break;
10354 case LED_MODE_ON:
10355 temp |= 0x00ff;
10356 break;
10357 default:
10358 break;
10359 }
10360 bnx2x_cl22_write(bp, phy,
10361 MDIO_REG_GPHY_SHADOW,
10362 MDIO_REG_GPHY_SHADOW_WR_ENA | temp);
10363 return;
10364}
10365
10366
10330static void bnx2x_54618se_link_reset(struct bnx2x_phy *phy, 10367static void bnx2x_54618se_link_reset(struct bnx2x_phy *phy,
10331 struct link_params *params) 10368 struct link_params *params)
10332{ 10369{
@@ -11103,7 +11140,7 @@ static struct bnx2x_phy phy_54618se = {
11103 .config_loopback = (config_loopback_t)bnx2x_54618se_config_loopback, 11140 .config_loopback = (config_loopback_t)bnx2x_54618se_config_loopback,
11104 .format_fw_ver = (format_fw_ver_t)NULL, 11141 .format_fw_ver = (format_fw_ver_t)NULL,
11105 .hw_reset = (hw_reset_t)NULL, 11142 .hw_reset = (hw_reset_t)NULL,
11106 .set_link_led = (set_link_led_t)NULL, 11143 .set_link_led = (set_link_led_t)bnx2x_5461x_set_link_led,
11107 .phy_specific_func = (phy_specific_func_t)NULL 11144 .phy_specific_func = (phy_specific_func_t)NULL
11108}; 11145};
11109/*****************************************************************/ 11146/*****************************************************************/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 6486ab8c8fc8..2f6361e949f0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -10548,33 +10548,38 @@ do { \
10548 10548
10549int bnx2x_init_firmware(struct bnx2x *bp) 10549int bnx2x_init_firmware(struct bnx2x *bp)
10550{ 10550{
10551 const char *fw_file_name;
10552 struct bnx2x_fw_file_hdr *fw_hdr; 10551 struct bnx2x_fw_file_hdr *fw_hdr;
10553 int rc; 10552 int rc;
10554 10553
10555 if (CHIP_IS_E1(bp))
10556 fw_file_name = FW_FILE_NAME_E1;
10557 else if (CHIP_IS_E1H(bp))
10558 fw_file_name = FW_FILE_NAME_E1H;
10559 else if (!CHIP_IS_E1x(bp))
10560 fw_file_name = FW_FILE_NAME_E2;
10561 else {
10562 BNX2X_ERR("Unsupported chip revision\n");
10563 return -EINVAL;
10564 }
10565 10554
10566 BNX2X_DEV_INFO("Loading %s\n", fw_file_name); 10555 if (!bp->firmware) {
10556 const char *fw_file_name;
10567 10557
10568 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev); 10558 if (CHIP_IS_E1(bp))
10569 if (rc) { 10559 fw_file_name = FW_FILE_NAME_E1;
10570 BNX2X_ERR("Can't load firmware file %s\n", fw_file_name); 10560 else if (CHIP_IS_E1H(bp))
10571 goto request_firmware_exit; 10561 fw_file_name = FW_FILE_NAME_E1H;
10572 } 10562 else if (!CHIP_IS_E1x(bp))
10563 fw_file_name = FW_FILE_NAME_E2;
10564 else {
10565 BNX2X_ERR("Unsupported chip revision\n");
10566 return -EINVAL;
10567 }
10568 BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
10573 10569
10574 rc = bnx2x_check_firmware(bp); 10570 rc = request_firmware(&bp->firmware, fw_file_name,
10575 if (rc) { 10571 &bp->pdev->dev);
10576 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name); 10572 if (rc) {
10577 goto request_firmware_exit; 10573 BNX2X_ERR("Can't load firmware file %s\n",
10574 fw_file_name);
10575 goto request_firmware_exit;
10576 }
10577
10578 rc = bnx2x_check_firmware(bp);
10579 if (rc) {
10580 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
10581 goto request_firmware_exit;
10582 }
10578 } 10583 }
10579 10584
10580 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data; 10585 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
@@ -10630,6 +10635,7 @@ static void bnx2x_release_firmware(struct bnx2x *bp)
10630 kfree(bp->init_ops); 10635 kfree(bp->init_ops);
10631 kfree(bp->init_data); 10636 kfree(bp->init_data);
10632 release_firmware(bp->firmware); 10637 release_firmware(bp->firmware);
10638 bp->firmware = NULL;
10633} 10639}
10634 10640
10635 10641
@@ -10925,6 +10931,8 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10925 if (bp->doorbells) 10931 if (bp->doorbells)
10926 iounmap(bp->doorbells); 10932 iounmap(bp->doorbells);
10927 10933
10934 bnx2x_release_firmware(bp);
10935
10928 bnx2x_free_mem_bp(bp); 10936 bnx2x_free_mem_bp(bp);
10929 10937
10930 free_netdev(dev); 10938 free_netdev(dev);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index fc7bd0f23c0b..e58073ef33b4 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -6990,6 +6990,7 @@ Theotherbitsarereservedandshouldbezero*/
6990#define MDIO_REG_INTR_MASK 0x1b 6990#define MDIO_REG_INTR_MASK 0x1b
6991#define MDIO_REG_INTR_MASK_LINK_STATUS (0x1 << 1) 6991#define MDIO_REG_INTR_MASK_LINK_STATUS (0x1 << 1)
6992#define MDIO_REG_GPHY_SHADOW 0x1c 6992#define MDIO_REG_GPHY_SHADOW 0x1c
6993#define MDIO_REG_GPHY_SHADOW_LED_SEL1 (0x0d << 10)
6993#define MDIO_REG_GPHY_SHADOW_LED_SEL2 (0x0e << 10) 6994#define MDIO_REG_GPHY_SHADOW_LED_SEL2 (0x0e << 10)
6994#define MDIO_REG_GPHY_SHADOW_WR_ENA (0x1 << 15) 6995#define MDIO_REG_GPHY_SHADOW_WR_ENA (0x1 << 15)
6995#define MDIO_REG_GPHY_SHADOW_AUTO_DET_MED (0x1e << 10) 6996#define MDIO_REG_GPHY_SHADOW_AUTO_DET_MED (0x1e << 10)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 0440425c83d6..14517691f8db 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -5380,7 +5380,7 @@ static int bnx2x_func_hw_init(struct bnx2x *bp,
5380 rc = drv->init_fw(bp); 5380 rc = drv->init_fw(bp);
5381 if (rc) { 5381 if (rc) {
5382 BNX2X_ERR("Error loading firmware\n"); 5382 BNX2X_ERR("Error loading firmware\n");
5383 goto fw_init_err; 5383 goto init_err;
5384 } 5384 }
5385 5385
5386 /* Handle the beginning of COMMON_XXX pases separatelly... */ 5386 /* Handle the beginning of COMMON_XXX pases separatelly... */
@@ -5388,25 +5388,25 @@ static int bnx2x_func_hw_init(struct bnx2x *bp,
5388 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: 5388 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
5389 rc = bnx2x_func_init_cmn_chip(bp, drv); 5389 rc = bnx2x_func_init_cmn_chip(bp, drv);
5390 if (rc) 5390 if (rc)
5391 goto init_hw_err; 5391 goto init_err;
5392 5392
5393 break; 5393 break;
5394 case FW_MSG_CODE_DRV_LOAD_COMMON: 5394 case FW_MSG_CODE_DRV_LOAD_COMMON:
5395 rc = bnx2x_func_init_cmn(bp, drv); 5395 rc = bnx2x_func_init_cmn(bp, drv);
5396 if (rc) 5396 if (rc)
5397 goto init_hw_err; 5397 goto init_err;
5398 5398
5399 break; 5399 break;
5400 case FW_MSG_CODE_DRV_LOAD_PORT: 5400 case FW_MSG_CODE_DRV_LOAD_PORT:
5401 rc = bnx2x_func_init_port(bp, drv); 5401 rc = bnx2x_func_init_port(bp, drv);
5402 if (rc) 5402 if (rc)
5403 goto init_hw_err; 5403 goto init_err;
5404 5404
5405 break; 5405 break;
5406 case FW_MSG_CODE_DRV_LOAD_FUNCTION: 5406 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5407 rc = bnx2x_func_init_func(bp, drv); 5407 rc = bnx2x_func_init_func(bp, drv);
5408 if (rc) 5408 if (rc)
5409 goto init_hw_err; 5409 goto init_err;
5410 5410
5411 break; 5411 break;
5412 default: 5412 default:
@@ -5414,10 +5414,7 @@ static int bnx2x_func_hw_init(struct bnx2x *bp,
5414 rc = -EINVAL; 5414 rc = -EINVAL;
5415 } 5415 }
5416 5416
5417init_hw_err: 5417init_err:
5418 drv->release_fw(bp);
5419
5420fw_init_err:
5421 drv->gunzip_end(bp); 5418 drv->gunzip_end(bp);
5422 5419
5423 /* In case of success, complete the comand immediatelly: no ramrods 5420 /* In case of success, complete the comand immediatelly: no ramrods
diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig
index 98849a1fc749..b48378a41e49 100644
--- a/drivers/net/ethernet/cadence/Kconfig
+++ b/drivers/net/ethernet/cadence/Kconfig
@@ -7,6 +7,7 @@ config HAVE_NET_MACB
7 7
8config NET_ATMEL 8config NET_ATMEL
9 bool "Atmel devices" 9 bool "Atmel devices"
10 default y
10 depends on HAVE_NET_MACB || (ARM && ARCH_AT91RM9200) 11 depends on HAVE_NET_MACB || (ARM && ARCH_AT91RM9200)
11 ---help--- 12 ---help---
12 If you have a network (Ethernet) card belonging to this class, say Y. 13 If you have a network (Ethernet) card belonging to this class, say Y.
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 438f4580bf66..2a22f5256353 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -613,7 +613,7 @@ static int dm9000_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
613 613
614 if (!dm->wake_state) 614 if (!dm->wake_state)
615 irq_set_irq_wake(dm->irq_wake, 1); 615 irq_set_irq_wake(dm->irq_wake, 1);
616 else if (dm->wake_state & !opts) 616 else if (dm->wake_state && !opts)
617 irq_set_irq_wake(dm->irq_wake, 0); 617 irq_set_irq_wake(dm->irq_wake, 0);
618 } 618 }
619 619
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index c520cfd3b298..5272f9d4dda9 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -24,6 +24,7 @@ config FEC
24 bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)" 24 bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
25 depends on (M523x || M527x || M5272 || M528x || M520x || M532x || \ 25 depends on (M523x || M527x || M5272 || M528x || M520x || M532x || \
26 ARCH_MXC || ARCH_MXS) 26 ARCH_MXC || ARCH_MXS)
27 default ARCH_MXC || ARCH_MXS if ARM
27 select PHYLIB 28 select PHYLIB
28 ---help--- 29 ---help---
29 Say Y here if you want to use the built-in 10/100 Fast ethernet 30 Say Y here if you want to use the built-in 10/100 Fast ethernet
diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c
index 1124ce0a1594..c136230d50bb 100644
--- a/drivers/net/ethernet/freescale/fec.c
+++ b/drivers/net/ethernet/freescale/fec.c
@@ -232,6 +232,7 @@ struct fec_enet_private {
232 struct platform_device *pdev; 232 struct platform_device *pdev;
233 233
234 int opened; 234 int opened;
235 int dev_id;
235 236
236 /* Phylib and MDIO interface */ 237 /* Phylib and MDIO interface */
237 struct mii_bus *mii_bus; 238 struct mii_bus *mii_bus;
@@ -837,7 +838,7 @@ static void __inline__ fec_get_mac(struct net_device *ndev)
837 838
838 /* Adjust MAC if using macaddr */ 839 /* Adjust MAC if using macaddr */
839 if (iap == macaddr) 840 if (iap == macaddr)
840 ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->pdev->id; 841 ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->dev_id;
841} 842}
842 843
843/* ------------------------------------------------------------------------- */ 844/* ------------------------------------------------------------------------- */
@@ -953,7 +954,7 @@ static int fec_enet_mii_probe(struct net_device *ndev)
953 char mdio_bus_id[MII_BUS_ID_SIZE]; 954 char mdio_bus_id[MII_BUS_ID_SIZE];
954 char phy_name[MII_BUS_ID_SIZE + 3]; 955 char phy_name[MII_BUS_ID_SIZE + 3];
955 int phy_id; 956 int phy_id;
956 int dev_id = fep->pdev->id; 957 int dev_id = fep->dev_id;
957 958
958 fep->phy_dev = NULL; 959 fep->phy_dev = NULL;
959 960
@@ -1031,7 +1032,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
1031 * mdio interface in board design, and need to be configured by 1032 * mdio interface in board design, and need to be configured by
1032 * fec0 mii_bus. 1033 * fec0 mii_bus.
1033 */ 1034 */
1034 if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && pdev->id > 0) { 1035 if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && fep->dev_id > 0) {
1035 /* fec1 uses fec0 mii_bus */ 1036 /* fec1 uses fec0 mii_bus */
1036 fep->mii_bus = fec0_mii_bus; 1037 fep->mii_bus = fec0_mii_bus;
1037 return 0; 1038 return 0;
@@ -1063,7 +1064,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
1063 fep->mii_bus->read = fec_enet_mdio_read; 1064 fep->mii_bus->read = fec_enet_mdio_read;
1064 fep->mii_bus->write = fec_enet_mdio_write; 1065 fep->mii_bus->write = fec_enet_mdio_write;
1065 fep->mii_bus->reset = fec_enet_mdio_reset; 1066 fep->mii_bus->reset = fec_enet_mdio_reset;
1066 snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id + 1); 1067 snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%x", fep->dev_id + 1);
1067 fep->mii_bus->priv = fep; 1068 fep->mii_bus->priv = fep;
1068 fep->mii_bus->parent = &pdev->dev; 1069 fep->mii_bus->parent = &pdev->dev;
1069 1070
@@ -1521,6 +1522,7 @@ fec_probe(struct platform_device *pdev)
1521 int i, irq, ret = 0; 1522 int i, irq, ret = 0;
1522 struct resource *r; 1523 struct resource *r;
1523 const struct of_device_id *of_id; 1524 const struct of_device_id *of_id;
1525 static int dev_id;
1524 1526
1525 of_id = of_match_device(fec_dt_ids, &pdev->dev); 1527 of_id = of_match_device(fec_dt_ids, &pdev->dev);
1526 if (of_id) 1528 if (of_id)
@@ -1548,6 +1550,7 @@ fec_probe(struct platform_device *pdev)
1548 1550
1549 fep->hwp = ioremap(r->start, resource_size(r)); 1551 fep->hwp = ioremap(r->start, resource_size(r));
1550 fep->pdev = pdev; 1552 fep->pdev = pdev;
1553 fep->dev_id = dev_id++;
1551 1554
1552 if (!fep->hwp) { 1555 if (!fep->hwp) {
1553 ret = -ENOMEM; 1556 ret = -ENOMEM;
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index 52f4e8ad48e7..4d9f84b8ab97 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -183,28 +183,10 @@ void fsl_pq_mdio_bus_name(char *name, struct device_node *np)
183} 183}
184EXPORT_SYMBOL_GPL(fsl_pq_mdio_bus_name); 184EXPORT_SYMBOL_GPL(fsl_pq_mdio_bus_name);
185 185
186/* Scan the bus in reverse, looking for an empty spot */
187static int fsl_pq_mdio_find_free(struct mii_bus *new_bus)
188{
189 int i;
190
191 for (i = PHY_MAX_ADDR; i > 0; i--) {
192 u32 phy_id;
193
194 if (get_phy_id(new_bus, i, &phy_id))
195 return -1;
196
197 if (phy_id == 0xffffffff)
198 break;
199 }
200
201 return i;
202}
203
204 186
205#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
206static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs, struct device_node *np) 187static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs, struct device_node *np)
207{ 188{
189#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
208 struct gfar __iomem *enet_regs; 190 struct gfar __iomem *enet_regs;
209 191
210 /* 192 /*
@@ -220,15 +202,15 @@ static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs, struct devi
220 } else if (of_device_is_compatible(np, "fsl,etsec2-mdio") || 202 } else if (of_device_is_compatible(np, "fsl,etsec2-mdio") ||
221 of_device_is_compatible(np, "fsl,etsec2-tbi")) { 203 of_device_is_compatible(np, "fsl,etsec2-tbi")) {
222 return of_iomap(np, 1); 204 return of_iomap(np, 1);
223 } else 205 }
224 return NULL;
225}
226#endif 206#endif
207 return NULL;
208}
227 209
228 210
229#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
230static int get_ucc_id_for_range(u64 start, u64 end, u32 *ucc_id) 211static int get_ucc_id_for_range(u64 start, u64 end, u32 *ucc_id)
231{ 212{
213#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
232 struct device_node *np = NULL; 214 struct device_node *np = NULL;
233 int err = 0; 215 int err = 0;
234 216
@@ -261,9 +243,10 @@ static int get_ucc_id_for_range(u64 start, u64 end, u32 *ucc_id)
261 return err; 243 return err;
262 else 244 else
263 return -EINVAL; 245 return -EINVAL;
264} 246#else
247 return -ENODEV;
265#endif 248#endif
266 249}
267 250
268static int fsl_pq_mdio_probe(struct platform_device *ofdev) 251static int fsl_pq_mdio_probe(struct platform_device *ofdev)
269{ 252{
@@ -339,19 +322,13 @@ static int fsl_pq_mdio_probe(struct platform_device *ofdev)
339 of_device_is_compatible(np, "fsl,etsec2-mdio") || 322 of_device_is_compatible(np, "fsl,etsec2-mdio") ||
340 of_device_is_compatible(np, "fsl,etsec2-tbi") || 323 of_device_is_compatible(np, "fsl,etsec2-tbi") ||
341 of_device_is_compatible(np, "gianfar")) { 324 of_device_is_compatible(np, "gianfar")) {
342#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
343 tbipa = get_gfar_tbipa(regs, np); 325 tbipa = get_gfar_tbipa(regs, np);
344 if (!tbipa) { 326 if (!tbipa) {
345 err = -EINVAL; 327 err = -EINVAL;
346 goto err_free_irqs; 328 goto err_free_irqs;
347 } 329 }
348#else
349 err = -ENODEV;
350 goto err_free_irqs;
351#endif
352 } else if (of_device_is_compatible(np, "fsl,ucc-mdio") || 330 } else if (of_device_is_compatible(np, "fsl,ucc-mdio") ||
353 of_device_is_compatible(np, "ucc_geth_phy")) { 331 of_device_is_compatible(np, "ucc_geth_phy")) {
354#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
355 u32 id; 332 u32 id;
356 static u32 mii_mng_master; 333 static u32 mii_mng_master;
357 334
@@ -364,10 +341,6 @@ static int fsl_pq_mdio_probe(struct platform_device *ofdev)
364 mii_mng_master = id; 341 mii_mng_master = id;
365 ucc_set_qe_mux_mii_mng(id - 1); 342 ucc_set_qe_mux_mii_mng(id - 1);
366 } 343 }
367#else
368 err = -ENODEV;
369 goto err_free_irqs;
370#endif
371 } else { 344 } else {
372 err = -ENODEV; 345 err = -ENODEV;
373 goto err_free_irqs; 346 goto err_free_irqs;
@@ -386,16 +359,6 @@ static int fsl_pq_mdio_probe(struct platform_device *ofdev)
386 } 359 }
387 360
388 if (tbiaddr == -1) { 361 if (tbiaddr == -1) {
389 out_be32(tbipa, 0);
390
391 tbiaddr = fsl_pq_mdio_find_free(new_bus);
392 }
393
394 /*
395 * We define TBIPA at 0 to be illegal, opting to fail for boards that
396 * have PHYs at 1-31, rather than change tbipa and rescan.
397 */
398 if (tbiaddr == 0) {
399 err = -EBUSY; 362 err = -EBUSY;
400 363
401 goto err_free_irqs; 364 goto err_free_irqs;
diff --git a/drivers/net/ethernet/ibm/ehea/ehea.h b/drivers/net/ethernet/ibm/ehea/ehea.h
index 410d6a1984ed..6650068c996c 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea.h
+++ b/drivers/net/ethernet/ibm/ehea/ehea.h
@@ -61,9 +61,9 @@
61#ifdef EHEA_SMALL_QUEUES 61#ifdef EHEA_SMALL_QUEUES
62#define EHEA_MAX_CQE_COUNT 1023 62#define EHEA_MAX_CQE_COUNT 1023
63#define EHEA_DEF_ENTRIES_SQ 1023 63#define EHEA_DEF_ENTRIES_SQ 1023
64#define EHEA_DEF_ENTRIES_RQ1 4095 64#define EHEA_DEF_ENTRIES_RQ1 1023
65#define EHEA_DEF_ENTRIES_RQ2 1023 65#define EHEA_DEF_ENTRIES_RQ2 1023
66#define EHEA_DEF_ENTRIES_RQ3 1023 66#define EHEA_DEF_ENTRIES_RQ3 511
67#else 67#else
68#define EHEA_MAX_CQE_COUNT 4080 68#define EHEA_MAX_CQE_COUNT 4080
69#define EHEA_DEF_ENTRIES_SQ 4080 69#define EHEA_DEF_ENTRIES_SQ 4080
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 37b70f7052b6..bfeccbfde236 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -371,7 +371,8 @@ static void ehea_update_stats(struct work_struct *work)
371out_herr: 371out_herr:
372 free_page((unsigned long)cb2); 372 free_page((unsigned long)cb2);
373resched: 373resched:
374 schedule_delayed_work(&port->stats_work, msecs_to_jiffies(1000)); 374 schedule_delayed_work(&port->stats_work,
375 round_jiffies_relative(msecs_to_jiffies(1000)));
375} 376}
376 377
377static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes) 378static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
@@ -2434,7 +2435,8 @@ static int ehea_open(struct net_device *dev)
2434 } 2435 }
2435 2436
2436 mutex_unlock(&port->port_lock); 2437 mutex_unlock(&port->port_lock);
2437 schedule_delayed_work(&port->stats_work, msecs_to_jiffies(1000)); 2438 schedule_delayed_work(&port->stats_work,
2439 round_jiffies_relative(msecs_to_jiffies(1000)));
2438 2440
2439 return ret; 2441 return ret;
2440} 2442}
diff --git a/drivers/net/ethernet/ibm/iseries_veth.c b/drivers/net/ethernet/ibm/iseries_veth.c
index 4326681df382..acc31af6594a 100644
--- a/drivers/net/ethernet/ibm/iseries_veth.c
+++ b/drivers/net/ethernet/ibm/iseries_veth.c
@@ -1421,7 +1421,7 @@ static void veth_receive(struct veth_lpar_connection *cnx,
1421 1421
1422 /* FIXME: do we need this? */ 1422 /* FIXME: do we need this? */
1423 memset(local_list, 0, sizeof(local_list)); 1423 memset(local_list, 0, sizeof(local_list));
1424 memset(remote_list, 0, sizeof(VETH_MAX_FRAMES_PER_MSG)); 1424 memset(remote_list, 0, sizeof(remote_list));
1425 1425
1426 /* a 0 address marks the end of the valid entries */ 1426 /* a 0 address marks the end of the valid entries */
1427 if (senddata->addr[startchunk] == 0) 1427 if (senddata->addr[startchunk] == 0)
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 7becff1f387d..76b84573566b 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -1745,6 +1745,112 @@ jme_phy_off(struct jme_adapter *jme)
1745} 1745}
1746 1746
1747static int 1747static int
1748jme_phy_specreg_read(struct jme_adapter *jme, u32 specreg)
1749{
1750 u32 phy_addr;
1751
1752 phy_addr = JM_PHY_SPEC_REG_READ | specreg;
1753 jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_ADDR_REG,
1754 phy_addr);
1755 return jme_mdio_read(jme->dev, jme->mii_if.phy_id,
1756 JM_PHY_SPEC_DATA_REG);
1757}
1758
1759static void
1760jme_phy_specreg_write(struct jme_adapter *jme, u32 ext_reg, u32 phy_data)
1761{
1762 u32 phy_addr;
1763
1764 phy_addr = JM_PHY_SPEC_REG_WRITE | ext_reg;
1765 jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_DATA_REG,
1766 phy_data);
1767 jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_ADDR_REG,
1768 phy_addr);
1769}
1770
1771static int
1772jme_phy_calibration(struct jme_adapter *jme)
1773{
1774 u32 ctrl1000, phy_data;
1775
1776 jme_phy_off(jme);
1777 jme_phy_on(jme);
1778 /* Enabel PHY test mode 1 */
1779 ctrl1000 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_CTRL1000);
1780 ctrl1000 &= ~PHY_GAD_TEST_MODE_MSK;
1781 ctrl1000 |= PHY_GAD_TEST_MODE_1;
1782 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_CTRL1000, ctrl1000);
1783
1784 phy_data = jme_phy_specreg_read(jme, JM_PHY_EXT_COMM_2_REG);
1785 phy_data &= ~JM_PHY_EXT_COMM_2_CALI_MODE_0;
1786 phy_data |= JM_PHY_EXT_COMM_2_CALI_LATCH |
1787 JM_PHY_EXT_COMM_2_CALI_ENABLE;
1788 jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_2_REG, phy_data);
1789 msleep(20);
1790 phy_data = jme_phy_specreg_read(jme, JM_PHY_EXT_COMM_2_REG);
1791 phy_data &= ~(JM_PHY_EXT_COMM_2_CALI_ENABLE |
1792 JM_PHY_EXT_COMM_2_CALI_MODE_0 |
1793 JM_PHY_EXT_COMM_2_CALI_LATCH);
1794 jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_2_REG, phy_data);
1795
1796 /* Disable PHY test mode */
1797 ctrl1000 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_CTRL1000);
1798 ctrl1000 &= ~PHY_GAD_TEST_MODE_MSK;
1799 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_CTRL1000, ctrl1000);
1800 return 0;
1801}
1802
1803static int
1804jme_phy_setEA(struct jme_adapter *jme)
1805{
1806 u32 phy_comm0 = 0, phy_comm1 = 0;
1807 u8 nic_ctrl;
1808
1809 pci_read_config_byte(jme->pdev, PCI_PRIV_SHARE_NICCTRL, &nic_ctrl);
1810 if ((nic_ctrl & 0x3) == JME_FLAG_PHYEA_ENABLE)
1811 return 0;
1812
1813 switch (jme->pdev->device) {
1814 case PCI_DEVICE_ID_JMICRON_JMC250:
1815 if (((jme->chip_main_rev == 5) &&
1816 ((jme->chip_sub_rev == 0) || (jme->chip_sub_rev == 1) ||
1817 (jme->chip_sub_rev == 3))) ||
1818 (jme->chip_main_rev >= 6)) {
1819 phy_comm0 = 0x008A;
1820 phy_comm1 = 0x4109;
1821 }
1822 if ((jme->chip_main_rev == 3) &&
1823 ((jme->chip_sub_rev == 1) || (jme->chip_sub_rev == 2)))
1824 phy_comm0 = 0xE088;
1825 break;
1826 case PCI_DEVICE_ID_JMICRON_JMC260:
1827 if (((jme->chip_main_rev == 5) &&
1828 ((jme->chip_sub_rev == 0) || (jme->chip_sub_rev == 1) ||
1829 (jme->chip_sub_rev == 3))) ||
1830 (jme->chip_main_rev >= 6)) {
1831 phy_comm0 = 0x008A;
1832 phy_comm1 = 0x4109;
1833 }
1834 if ((jme->chip_main_rev == 3) &&
1835 ((jme->chip_sub_rev == 1) || (jme->chip_sub_rev == 2)))
1836 phy_comm0 = 0xE088;
1837 if ((jme->chip_main_rev == 2) && (jme->chip_sub_rev == 0))
1838 phy_comm0 = 0x608A;
1839 if ((jme->chip_main_rev == 2) && (jme->chip_sub_rev == 2))
1840 phy_comm0 = 0x408A;
1841 break;
1842 default:
1843 return -ENODEV;
1844 }
1845 if (phy_comm0)
1846 jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_0_REG, phy_comm0);
1847 if (phy_comm1)
1848 jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_1_REG, phy_comm1);
1849
1850 return 0;
1851}
1852
1853static int
1748jme_open(struct net_device *netdev) 1854jme_open(struct net_device *netdev)
1749{ 1855{
1750 struct jme_adapter *jme = netdev_priv(netdev); 1856 struct jme_adapter *jme = netdev_priv(netdev);
@@ -1769,7 +1875,8 @@ jme_open(struct net_device *netdev)
1769 jme_set_settings(netdev, &jme->old_ecmd); 1875 jme_set_settings(netdev, &jme->old_ecmd);
1770 else 1876 else
1771 jme_reset_phy_processor(jme); 1877 jme_reset_phy_processor(jme);
1772 1878 jme_phy_calibration(jme);
1879 jme_phy_setEA(jme);
1773 jme_reset_link(jme); 1880 jme_reset_link(jme);
1774 1881
1775 return 0; 1882 return 0;
@@ -3184,7 +3291,8 @@ jme_resume(struct device *dev)
3184 jme_set_settings(netdev, &jme->old_ecmd); 3291 jme_set_settings(netdev, &jme->old_ecmd);
3185 else 3292 else
3186 jme_reset_phy_processor(jme); 3293 jme_reset_phy_processor(jme);
3187 3294 jme_phy_calibration(jme);
3295 jme_phy_setEA(jme);
3188 jme_start_irq(jme); 3296 jme_start_irq(jme);
3189 netif_device_attach(netdev); 3297 netif_device_attach(netdev);
3190 3298
@@ -3239,4 +3347,3 @@ MODULE_DESCRIPTION("JMicron JMC2x0 PCI Express Ethernet driver");
3239MODULE_LICENSE("GPL"); 3347MODULE_LICENSE("GPL");
3240MODULE_VERSION(DRV_VERSION); 3348MODULE_VERSION(DRV_VERSION);
3241MODULE_DEVICE_TABLE(pci, jme_pci_tbl); 3349MODULE_DEVICE_TABLE(pci, jme_pci_tbl);
3242
diff --git a/drivers/net/ethernet/jme.h b/drivers/net/ethernet/jme.h
index 02ea27c1dcb5..4304072bd3c5 100644
--- a/drivers/net/ethernet/jme.h
+++ b/drivers/net/ethernet/jme.h
@@ -760,6 +760,25 @@ enum jme_rxmcs_bits {
760 RXMCS_CHECKSUM, 760 RXMCS_CHECKSUM,
761}; 761};
762 762
763/* Extern PHY common register 2 */
764
765#define PHY_GAD_TEST_MODE_1 0x00002000
766#define PHY_GAD_TEST_MODE_MSK 0x0000E000
767#define JM_PHY_SPEC_REG_READ 0x00004000
768#define JM_PHY_SPEC_REG_WRITE 0x00008000
769#define PHY_CALIBRATION_DELAY 20
770#define JM_PHY_SPEC_ADDR_REG 0x1E
771#define JM_PHY_SPEC_DATA_REG 0x1F
772
773#define JM_PHY_EXT_COMM_0_REG 0x30
774#define JM_PHY_EXT_COMM_1_REG 0x31
775#define JM_PHY_EXT_COMM_2_REG 0x32
776#define JM_PHY_EXT_COMM_2_CALI_ENABLE 0x01
777#define JM_PHY_EXT_COMM_2_CALI_MODE_0 0x02
778#define JM_PHY_EXT_COMM_2_CALI_LATCH 0x10
779#define PCI_PRIV_SHARE_NICCTRL 0xF5
780#define JME_FLAG_PHYEA_ENABLE 0x2
781
763/* 782/*
764 * Wakeup Frame setup interface registers 783 * Wakeup Frame setup interface registers
765 */ 784 */
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 6bb2b9506cad..0b3567ab8121 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -34,6 +34,8 @@
34#include <linux/init.h> 34#include <linux/init.h>
35#include <linux/delay.h> 35#include <linux/delay.h>
36#include <linux/io.h> 36#include <linux/io.h>
37#include <linux/dma-mapping.h>
38#include <linux/module.h>
37 39
38#include <asm/checksum.h> 40#include <asm/checksum.h>
39 41
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index fdc6c394c683..7803efa46eb2 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -50,7 +50,7 @@
50#include "sky2.h" 50#include "sky2.h"
51 51
52#define DRV_NAME "sky2" 52#define DRV_NAME "sky2"
53#define DRV_VERSION "1.29" 53#define DRV_VERSION "1.30"
54 54
55/* 55/*
56 * The Yukon II chipset takes 64 bit command blocks (called list elements) 56 * The Yukon II chipset takes 64 bit command blocks (called list elements)
@@ -68,7 +68,7 @@
68#define MAX_SKB_TX_LE (2 + (sizeof(dma_addr_t)/sizeof(u32))*(MAX_SKB_FRAGS+1)) 68#define MAX_SKB_TX_LE (2 + (sizeof(dma_addr_t)/sizeof(u32))*(MAX_SKB_FRAGS+1))
69#define TX_MIN_PENDING (MAX_SKB_TX_LE+1) 69#define TX_MIN_PENDING (MAX_SKB_TX_LE+1)
70#define TX_MAX_PENDING 1024 70#define TX_MAX_PENDING 1024
71#define TX_DEF_PENDING 127 71#define TX_DEF_PENDING 63
72 72
73#define TX_WATCHDOG (5 * HZ) 73#define TX_WATCHDOG (5 * HZ)
74#define NAPI_WEIGHT 64 74#define NAPI_WEIGHT 64
@@ -869,6 +869,7 @@ static void sky2_wol_init(struct sky2_port *sky2)
869 869
870 /* block receiver */ 870 /* block receiver */
871 sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET); 871 sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
872 sky2_read32(hw, B0_CTST);
872} 873}
873 874
874static void sky2_set_tx_stfwd(struct sky2_hw *hw, unsigned port) 875static void sky2_set_tx_stfwd(struct sky2_hw *hw, unsigned port)
@@ -1274,6 +1275,14 @@ static void rx_set_checksum(struct sky2_port *sky2)
1274 ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM); 1275 ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
1275} 1276}
1276 1277
1278/*
1279 * Fixed initial key as seed to RSS.
1280 */
1281static const uint32_t rss_init_key[10] = {
1282 0x7c3351da, 0x51c5cf4e, 0x44adbdd1, 0xe8d38d18, 0x48897c43,
1283 0xb1d60e7e, 0x6a3dd760, 0x01a2e453, 0x16f46f13, 0x1a0e7b30
1284};
1285
1277/* Enable/disable receive hash calculation (RSS) */ 1286/* Enable/disable receive hash calculation (RSS) */
1278static void rx_set_rss(struct net_device *dev, u32 features) 1287static void rx_set_rss(struct net_device *dev, u32 features)
1279{ 1288{
@@ -1289,12 +1298,9 @@ static void rx_set_rss(struct net_device *dev, u32 features)
1289 1298
1290 /* Program RSS initial values */ 1299 /* Program RSS initial values */
1291 if (features & NETIF_F_RXHASH) { 1300 if (features & NETIF_F_RXHASH) {
1292 u32 key[nkeys];
1293
1294 get_random_bytes(key, nkeys * sizeof(u32));
1295 for (i = 0; i < nkeys; i++) 1301 for (i = 0; i < nkeys; i++)
1296 sky2_write32(hw, SK_REG(sky2->port, RSS_KEY + i * 4), 1302 sky2_write32(hw, SK_REG(sky2->port, RSS_KEY + i * 4),
1297 key[i]); 1303 rss_init_key[i]);
1298 1304
1299 /* Need to turn on (undocumented) flag to make hashing work */ 1305 /* Need to turn on (undocumented) flag to make hashing work */
1300 sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), 1306 sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T),
@@ -1717,6 +1723,8 @@ static int sky2_setup_irq(struct sky2_hw *hw, const char *name)
1717 if (err) 1723 if (err)
1718 dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq); 1724 dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq);
1719 else { 1725 else {
1726 hw->flags |= SKY2_HW_IRQ_SETUP;
1727
1720 napi_enable(&hw->napi); 1728 napi_enable(&hw->napi);
1721 sky2_write32(hw, B0_IMSK, Y2_IS_BASE); 1729 sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
1722 sky2_read32(hw, B0_IMSK); 1730 sky2_read32(hw, B0_IMSK);
@@ -1727,7 +1735,7 @@ static int sky2_setup_irq(struct sky2_hw *hw, const char *name)
1727 1735
1728 1736
1729/* Bring up network interface. */ 1737/* Bring up network interface. */
1730static int sky2_up(struct net_device *dev) 1738static int sky2_open(struct net_device *dev)
1731{ 1739{
1732 struct sky2_port *sky2 = netdev_priv(dev); 1740 struct sky2_port *sky2 = netdev_priv(dev);
1733 struct sky2_hw *hw = sky2->hw; 1741 struct sky2_hw *hw = sky2->hw;
@@ -1747,6 +1755,11 @@ static int sky2_up(struct net_device *dev)
1747 1755
1748 sky2_hw_up(sky2); 1756 sky2_hw_up(sky2);
1749 1757
1758 if (hw->chip_id == CHIP_ID_YUKON_OPT ||
1759 hw->chip_id == CHIP_ID_YUKON_PRM ||
1760 hw->chip_id == CHIP_ID_YUKON_OP_2)
1761 imask |= Y2_IS_PHY_QLNK; /* enable PHY Quick Link */
1762
1750 /* Enable interrupts from phy/mac for port */ 1763 /* Enable interrupts from phy/mac for port */
1751 imask = sky2_read32(hw, B0_IMSK); 1764 imask = sky2_read32(hw, B0_IMSK);
1752 imask |= portirq_msk[port]; 1765 imask |= portirq_msk[port];
@@ -2040,6 +2053,8 @@ static void sky2_tx_reset(struct sky2_hw *hw, unsigned port)
2040 2053
2041 sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), RB_RST_SET); 2054 sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), RB_RST_SET);
2042 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET); 2055 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET);
2056
2057 sky2_read32(hw, B0_CTST);
2043} 2058}
2044 2059
2045static void sky2_hw_down(struct sky2_port *sky2) 2060static void sky2_hw_down(struct sky2_port *sky2)
@@ -2090,7 +2105,7 @@ static void sky2_hw_down(struct sky2_port *sky2)
2090} 2105}
2091 2106
2092/* Network shutdown */ 2107/* Network shutdown */
2093static int sky2_down(struct net_device *dev) 2108static int sky2_close(struct net_device *dev)
2094{ 2109{
2095 struct sky2_port *sky2 = netdev_priv(dev); 2110 struct sky2_port *sky2 = netdev_priv(dev);
2096 struct sky2_hw *hw = sky2->hw; 2111 struct sky2_hw *hw = sky2->hw;
@@ -2101,15 +2116,22 @@ static int sky2_down(struct net_device *dev)
2101 2116
2102 netif_info(sky2, ifdown, dev, "disabling interface\n"); 2117 netif_info(sky2, ifdown, dev, "disabling interface\n");
2103 2118
2104 /* Disable port IRQ */
2105 sky2_write32(hw, B0_IMSK,
2106 sky2_read32(hw, B0_IMSK) & ~portirq_msk[sky2->port]);
2107 sky2_read32(hw, B0_IMSK);
2108
2109 if (hw->ports == 1) { 2119 if (hw->ports == 1) {
2120 sky2_write32(hw, B0_IMSK, 0);
2121 sky2_read32(hw, B0_IMSK);
2122
2110 napi_disable(&hw->napi); 2123 napi_disable(&hw->napi);
2111 free_irq(hw->pdev->irq, hw); 2124 free_irq(hw->pdev->irq, hw);
2125 hw->flags &= ~SKY2_HW_IRQ_SETUP;
2112 } else { 2126 } else {
2127 u32 imask;
2128
2129 /* Disable port IRQ */
2130 imask = sky2_read32(hw, B0_IMSK);
2131 imask &= ~portirq_msk[sky2->port];
2132 sky2_write32(hw, B0_IMSK, imask);
2133 sky2_read32(hw, B0_IMSK);
2134
2113 synchronize_irq(hw->pdev->irq); 2135 synchronize_irq(hw->pdev->irq);
2114 napi_synchronize(&hw->napi); 2136 napi_synchronize(&hw->napi);
2115 } 2137 }
@@ -2587,7 +2609,7 @@ static inline void sky2_tx_done(struct net_device *dev, u16 last)
2587 if (netif_running(dev)) { 2609 if (netif_running(dev)) {
2588 sky2_tx_complete(sky2, last); 2610 sky2_tx_complete(sky2, last);
2589 2611
2590 /* Wake unless it's detached, and called e.g. from sky2_down() */ 2612 /* Wake unless it's detached, and called e.g. from sky2_close() */
2591 if (tx_avail(sky2) > MAX_SKB_TX_LE + 4) 2613 if (tx_avail(sky2) > MAX_SKB_TX_LE + 4)
2592 netif_wake_queue(dev); 2614 netif_wake_queue(dev);
2593 } 2615 }
@@ -3258,7 +3280,6 @@ static void sky2_reset(struct sky2_hw *hw)
3258 hw->chip_id == CHIP_ID_YUKON_PRM || 3280 hw->chip_id == CHIP_ID_YUKON_PRM ||
3259 hw->chip_id == CHIP_ID_YUKON_OP_2) { 3281 hw->chip_id == CHIP_ID_YUKON_OP_2) {
3260 u16 reg; 3282 u16 reg;
3261 u32 msk;
3262 3283
3263 if (hw->chip_id == CHIP_ID_YUKON_OPT && hw->chip_rev == 0) { 3284 if (hw->chip_id == CHIP_ID_YUKON_OPT && hw->chip_rev == 0) {
3264 /* disable PCI-E PHY power down (set PHY reg 0x80, bit 7 */ 3285 /* disable PCI-E PHY power down (set PHY reg 0x80, bit 7 */
@@ -3281,11 +3302,6 @@ static void sky2_reset(struct sky2_hw *hw)
3281 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); 3302 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3282 sky2_pci_write16(hw, PSM_CONFIG_REG4, reg); 3303 sky2_pci_write16(hw, PSM_CONFIG_REG4, reg);
3283 3304
3284 /* enable PHY Quick Link */
3285 msk = sky2_read32(hw, B0_IMSK);
3286 msk |= Y2_IS_PHY_QLNK;
3287 sky2_write32(hw, B0_IMSK, msk);
3288
3289 /* check if PSMv2 was running before */ 3305 /* check if PSMv2 was running before */
3290 reg = sky2_pci_read16(hw, PSM_CONFIG_REG3); 3306 reg = sky2_pci_read16(hw, PSM_CONFIG_REG3);
3291 if (reg & PCI_EXP_LNKCTL_ASPMC) 3307 if (reg & PCI_EXP_LNKCTL_ASPMC)
@@ -3383,7 +3399,7 @@ static void sky2_detach(struct net_device *dev)
3383 netif_tx_lock(dev); 3399 netif_tx_lock(dev);
3384 netif_device_detach(dev); /* stop txq */ 3400 netif_device_detach(dev); /* stop txq */
3385 netif_tx_unlock(dev); 3401 netif_tx_unlock(dev);
3386 sky2_down(dev); 3402 sky2_close(dev);
3387 } 3403 }
3388} 3404}
3389 3405
@@ -3393,7 +3409,7 @@ static int sky2_reattach(struct net_device *dev)
3393 int err = 0; 3409 int err = 0;
3394 3410
3395 if (netif_running(dev)) { 3411 if (netif_running(dev)) {
3396 err = sky2_up(dev); 3412 err = sky2_open(dev);
3397 if (err) { 3413 if (err) {
3398 netdev_info(dev, "could not restart %d\n", err); 3414 netdev_info(dev, "could not restart %d\n", err);
3399 dev_close(dev); 3415 dev_close(dev);
@@ -3410,10 +3426,13 @@ static void sky2_all_down(struct sky2_hw *hw)
3410{ 3426{
3411 int i; 3427 int i;
3412 3428
3413 sky2_read32(hw, B0_IMSK); 3429 if (hw->flags & SKY2_HW_IRQ_SETUP) {
3414 sky2_write32(hw, B0_IMSK, 0); 3430 sky2_read32(hw, B0_IMSK);
3415 synchronize_irq(hw->pdev->irq); 3431 sky2_write32(hw, B0_IMSK, 0);
3416 napi_disable(&hw->napi); 3432
3433 synchronize_irq(hw->pdev->irq);
3434 napi_disable(&hw->napi);
3435 }
3417 3436
3418 for (i = 0; i < hw->ports; i++) { 3437 for (i = 0; i < hw->ports; i++) {
3419 struct net_device *dev = hw->dev[i]; 3438 struct net_device *dev = hw->dev[i];
@@ -3446,11 +3465,12 @@ static void sky2_all_up(struct sky2_hw *hw)
3446 netif_wake_queue(dev); 3465 netif_wake_queue(dev);
3447 } 3466 }
3448 3467
3449 sky2_write32(hw, B0_IMSK, imask); 3468 if (hw->flags & SKY2_HW_IRQ_SETUP) {
3450 sky2_read32(hw, B0_IMSK); 3469 sky2_write32(hw, B0_IMSK, imask);
3451 3470 sky2_read32(hw, B0_IMSK);
3452 sky2_read32(hw, B0_Y2_SP_LISR); 3471 sky2_read32(hw, B0_Y2_SP_LISR);
3453 napi_enable(&hw->napi); 3472 napi_enable(&hw->napi);
3473 }
3454} 3474}
3455 3475
3456static void sky2_restart(struct work_struct *work) 3476static void sky2_restart(struct work_struct *work)
@@ -4071,6 +4091,16 @@ static int sky2_set_coalesce(struct net_device *dev,
4071 return 0; 4091 return 0;
4072} 4092}
4073 4093
4094/*
4095 * Hardware is limited to min of 128 and max of 2048 for ring size
4096 * and rounded up to next power of two
4097 * to avoid division in modulus calclation
4098 */
4099static unsigned long roundup_ring_size(unsigned long pending)
4100{
4101 return max(128ul, roundup_pow_of_two(pending+1));
4102}
4103
4074static void sky2_get_ringparam(struct net_device *dev, 4104static void sky2_get_ringparam(struct net_device *dev,
4075 struct ethtool_ringparam *ering) 4105 struct ethtool_ringparam *ering)
4076{ 4106{
@@ -4098,7 +4128,7 @@ static int sky2_set_ringparam(struct net_device *dev,
4098 4128
4099 sky2->rx_pending = ering->rx_pending; 4129 sky2->rx_pending = ering->rx_pending;
4100 sky2->tx_pending = ering->tx_pending; 4130 sky2->tx_pending = ering->tx_pending;
4101 sky2->tx_ring_size = roundup_pow_of_two(sky2->tx_pending+1); 4131 sky2->tx_ring_size = roundup_ring_size(sky2->tx_pending);
4102 4132
4103 return sky2_reattach(dev); 4133 return sky2_reattach(dev);
4104} 4134}
@@ -4556,7 +4586,7 @@ static int sky2_device_event(struct notifier_block *unused,
4556 struct net_device *dev = ptr; 4586 struct net_device *dev = ptr;
4557 struct sky2_port *sky2 = netdev_priv(dev); 4587 struct sky2_port *sky2 = netdev_priv(dev);
4558 4588
4559 if (dev->netdev_ops->ndo_open != sky2_up || !sky2_debug) 4589 if (dev->netdev_ops->ndo_open != sky2_open || !sky2_debug)
4560 return NOTIFY_DONE; 4590 return NOTIFY_DONE;
4561 4591
4562 switch (event) { 4592 switch (event) {
@@ -4621,8 +4651,8 @@ static __exit void sky2_debug_cleanup(void)
4621 not allowing netpoll on second port */ 4651 not allowing netpoll on second port */
4622static const struct net_device_ops sky2_netdev_ops[2] = { 4652static const struct net_device_ops sky2_netdev_ops[2] = {
4623 { 4653 {
4624 .ndo_open = sky2_up, 4654 .ndo_open = sky2_open,
4625 .ndo_stop = sky2_down, 4655 .ndo_stop = sky2_close,
4626 .ndo_start_xmit = sky2_xmit_frame, 4656 .ndo_start_xmit = sky2_xmit_frame,
4627 .ndo_do_ioctl = sky2_ioctl, 4657 .ndo_do_ioctl = sky2_ioctl,
4628 .ndo_validate_addr = eth_validate_addr, 4658 .ndo_validate_addr = eth_validate_addr,
@@ -4638,8 +4668,8 @@ static const struct net_device_ops sky2_netdev_ops[2] = {
4638#endif 4668#endif
4639 }, 4669 },
4640 { 4670 {
4641 .ndo_open = sky2_up, 4671 .ndo_open = sky2_open,
4642 .ndo_stop = sky2_down, 4672 .ndo_stop = sky2_close,
4643 .ndo_start_xmit = sky2_xmit_frame, 4673 .ndo_start_xmit = sky2_xmit_frame,
4644 .ndo_do_ioctl = sky2_ioctl, 4674 .ndo_do_ioctl = sky2_ioctl,
4645 .ndo_validate_addr = eth_validate_addr, 4675 .ndo_validate_addr = eth_validate_addr,
@@ -4692,7 +4722,7 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
4692 spin_lock_init(&sky2->phy_lock); 4722 spin_lock_init(&sky2->phy_lock);
4693 4723
4694 sky2->tx_pending = TX_DEF_PENDING; 4724 sky2->tx_pending = TX_DEF_PENDING;
4695 sky2->tx_ring_size = roundup_pow_of_two(TX_DEF_PENDING+1); 4725 sky2->tx_ring_size = roundup_ring_size(TX_DEF_PENDING);
4696 sky2->rx_pending = RX_DEF_PENDING; 4726 sky2->rx_pending = RX_DEF_PENDING;
4697 4727
4698 hw->dev[port] = dev; 4728 hw->dev[port] = dev;
diff --git a/drivers/net/ethernet/marvell/sky2.h b/drivers/net/ethernet/marvell/sky2.h
index 0af31b8b5f10..ff6f58bf822a 100644
--- a/drivers/net/ethernet/marvell/sky2.h
+++ b/drivers/net/ethernet/marvell/sky2.h
@@ -2287,6 +2287,7 @@ struct sky2_hw {
2287#define SKY2_HW_RSS_BROKEN 0x00000100 2287#define SKY2_HW_RSS_BROKEN 0x00000100
2288#define SKY2_HW_VLAN_BROKEN 0x00000200 2288#define SKY2_HW_VLAN_BROKEN 0x00000200
2289#define SKY2_HW_RSS_CHKSUM 0x00000400 /* RSS requires chksum */ 2289#define SKY2_HW_RSS_CHKSUM 0x00000400 /* RSS requires chksum */
2290#define SKY2_HW_IRQ_SETUP 0x00000800
2290 2291
2291 u8 chip_id; 2292 u8 chip_id;
2292 u8 chip_rev; 2293 u8 chip_rev;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index b89c36dbf5b3..c2df6c358603 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -581,6 +581,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
581 * Packet is OK - process it. 581 * Packet is OK - process it.
582 */ 582 */
583 length = be32_to_cpu(cqe->byte_cnt); 583 length = be32_to_cpu(cqe->byte_cnt);
584 length -= ring->fcs_del;
584 ring->bytes += length; 585 ring->bytes += length;
585 ring->packets++; 586 ring->packets++;
586 587
@@ -813,8 +814,11 @@ static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn,
813 context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma); 814 context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma);
814 815
815 /* Cancel FCS removal if FW allows */ 816 /* Cancel FCS removal if FW allows */
816 if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP) 817 if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP) {
817 context->param3 |= cpu_to_be32(1 << 29); 818 context->param3 |= cpu_to_be32(1 << 29);
819 ring->fcs_del = ETH_FCS_LEN;
820 } else
821 ring->fcs_del = 0;
818 822
819 err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, context, qp, state); 823 err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, context, qp, state);
820 if (err) { 824 if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 8fda331c65df..207b5add3ca8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -272,6 +272,7 @@ struct mlx4_en_rx_ring {
272 u32 prod; 272 u32 prod;
273 u32 cons; 273 u32 cons;
274 u32 buf_size; 274 u32 buf_size;
275 u8 fcs_del;
275 void *buf; 276 void *buf;
276 void *rx_info; 277 void *rx_info;
277 unsigned long bytes; 278 unsigned long bytes;
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 1dca57013cb2..1c61d36e6570 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -609,7 +609,7 @@ struct nv_ethtool_str {
609}; 609};
610 610
611static const struct nv_ethtool_str nv_estats_str[] = { 611static const struct nv_ethtool_str nv_estats_str[] = {
612 { "tx_bytes" }, 612 { "tx_bytes" }, /* includes Ethernet FCS CRC */
613 { "tx_zero_rexmt" }, 613 { "tx_zero_rexmt" },
614 { "tx_one_rexmt" }, 614 { "tx_one_rexmt" },
615 { "tx_many_rexmt" }, 615 { "tx_many_rexmt" },
@@ -637,7 +637,7 @@ static const struct nv_ethtool_str nv_estats_str[] = {
637 /* version 2 stats */ 637 /* version 2 stats */
638 { "tx_deferral" }, 638 { "tx_deferral" },
639 { "tx_packets" }, 639 { "tx_packets" },
640 { "rx_bytes" }, 640 { "rx_bytes" }, /* includes Ethernet FCS CRC */
641 { "tx_pause" }, 641 { "tx_pause" },
642 { "rx_pause" }, 642 { "rx_pause" },
643 { "rx_drop_frame" }, 643 { "rx_drop_frame" },
@@ -649,7 +649,7 @@ static const struct nv_ethtool_str nv_estats_str[] = {
649}; 649};
650 650
651struct nv_ethtool_stats { 651struct nv_ethtool_stats {
652 u64 tx_bytes; 652 u64 tx_bytes; /* should be ifconfig->tx_bytes + 4*tx_packets */
653 u64 tx_zero_rexmt; 653 u64 tx_zero_rexmt;
654 u64 tx_one_rexmt; 654 u64 tx_one_rexmt;
655 u64 tx_many_rexmt; 655 u64 tx_many_rexmt;
@@ -670,14 +670,14 @@ struct nv_ethtool_stats {
670 u64 rx_unicast; 670 u64 rx_unicast;
671 u64 rx_multicast; 671 u64 rx_multicast;
672 u64 rx_broadcast; 672 u64 rx_broadcast;
673 u64 rx_packets; 673 u64 rx_packets; /* should be ifconfig->rx_packets */
674 u64 rx_errors_total; 674 u64 rx_errors_total;
675 u64 tx_errors_total; 675 u64 tx_errors_total;
676 676
677 /* version 2 stats */ 677 /* version 2 stats */
678 u64 tx_deferral; 678 u64 tx_deferral;
679 u64 tx_packets; 679 u64 tx_packets; /* should be ifconfig->tx_packets */
680 u64 rx_bytes; 680 u64 rx_bytes; /* should be ifconfig->rx_bytes + 4*rx_packets */
681 u64 tx_pause; 681 u64 tx_pause;
682 u64 rx_pause; 682 u64 rx_pause;
683 u64 rx_drop_frame; 683 u64 rx_drop_frame;
@@ -1706,10 +1706,17 @@ static struct net_device_stats *nv_get_stats(struct net_device *dev)
1706 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3)) { 1706 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3)) {
1707 nv_get_hw_stats(dev); 1707 nv_get_hw_stats(dev);
1708 1708
1709 /*
1710 * Note: because HW stats are not always available and
1711 * for consistency reasons, the following ifconfig
1712 * stats are managed by software: rx_bytes, tx_bytes,
1713 * rx_packets and tx_packets. The related hardware
1714 * stats reported by ethtool should be equivalent to
1715 * these ifconfig stats, with 4 additional bytes per
1716 * packet (Ethernet FCS CRC).
1717 */
1718
1709 /* copy to net_device stats */ 1719 /* copy to net_device stats */
1710 dev->stats.tx_packets = np->estats.tx_packets;
1711 dev->stats.rx_bytes = np->estats.rx_bytes;
1712 dev->stats.tx_bytes = np->estats.tx_bytes;
1713 dev->stats.tx_fifo_errors = np->estats.tx_fifo_errors; 1720 dev->stats.tx_fifo_errors = np->estats.tx_fifo_errors;
1714 dev->stats.tx_carrier_errors = np->estats.tx_carrier_errors; 1721 dev->stats.tx_carrier_errors = np->estats.tx_carrier_errors;
1715 dev->stats.rx_crc_errors = np->estats.rx_crc_errors; 1722 dev->stats.rx_crc_errors = np->estats.rx_crc_errors;
@@ -2380,6 +2387,9 @@ static int nv_tx_done(struct net_device *dev, int limit)
2380 if (flags & NV_TX_ERROR) { 2387 if (flags & NV_TX_ERROR) {
2381 if ((flags & NV_TX_RETRYERROR) && !(flags & NV_TX_RETRYCOUNT_MASK)) 2388 if ((flags & NV_TX_RETRYERROR) && !(flags & NV_TX_RETRYCOUNT_MASK))
2382 nv_legacybackoff_reseed(dev); 2389 nv_legacybackoff_reseed(dev);
2390 } else {
2391 dev->stats.tx_packets++;
2392 dev->stats.tx_bytes += np->get_tx_ctx->skb->len;
2383 } 2393 }
2384 dev_kfree_skb_any(np->get_tx_ctx->skb); 2394 dev_kfree_skb_any(np->get_tx_ctx->skb);
2385 np->get_tx_ctx->skb = NULL; 2395 np->get_tx_ctx->skb = NULL;
@@ -2390,6 +2400,9 @@ static int nv_tx_done(struct net_device *dev, int limit)
2390 if (flags & NV_TX2_ERROR) { 2400 if (flags & NV_TX2_ERROR) {
2391 if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) 2401 if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK))
2392 nv_legacybackoff_reseed(dev); 2402 nv_legacybackoff_reseed(dev);
2403 } else {
2404 dev->stats.tx_packets++;
2405 dev->stats.tx_bytes += np->get_tx_ctx->skb->len;
2393 } 2406 }
2394 dev_kfree_skb_any(np->get_tx_ctx->skb); 2407 dev_kfree_skb_any(np->get_tx_ctx->skb);
2395 np->get_tx_ctx->skb = NULL; 2408 np->get_tx_ctx->skb = NULL;
@@ -2429,6 +2442,9 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
2429 else 2442 else
2430 nv_legacybackoff_reseed(dev); 2443 nv_legacybackoff_reseed(dev);
2431 } 2444 }
2445 } else {
2446 dev->stats.tx_packets++;
2447 dev->stats.tx_bytes += np->get_tx_ctx->skb->len;
2432 } 2448 }
2433 2449
2434 dev_kfree_skb_any(np->get_tx_ctx->skb); 2450 dev_kfree_skb_any(np->get_tx_ctx->skb);
@@ -2678,6 +2694,7 @@ static int nv_rx_process(struct net_device *dev, int limit)
2678 skb->protocol = eth_type_trans(skb, dev); 2694 skb->protocol = eth_type_trans(skb, dev);
2679 napi_gro_receive(&np->napi, skb); 2695 napi_gro_receive(&np->napi, skb);
2680 dev->stats.rx_packets++; 2696 dev->stats.rx_packets++;
2697 dev->stats.rx_bytes += len;
2681next_pkt: 2698next_pkt:
2682 if (unlikely(np->get_rx.orig++ == np->last_rx.orig)) 2699 if (unlikely(np->get_rx.orig++ == np->last_rx.orig))
2683 np->get_rx.orig = np->first_rx.orig; 2700 np->get_rx.orig = np->first_rx.orig;
@@ -2761,6 +2778,7 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
2761 } 2778 }
2762 napi_gro_receive(&np->napi, skb); 2779 napi_gro_receive(&np->napi, skb);
2763 dev->stats.rx_packets++; 2780 dev->stats.rx_packets++;
2781 dev->stats.rx_bytes += len;
2764 } else { 2782 } else {
2765 dev_kfree_skb(skb); 2783 dev_kfree_skb(skb);
2766 } 2784 }
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
index 9c075ea2682e..9cb5f912e489 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
@@ -18,8 +18,8 @@
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
19 */ 19 */
20 20
21#include <linux/module.h> /* for __MODULE_STRING */
22#include "pch_gbe.h" 21#include "pch_gbe.h"
22#include <linux/module.h> /* for __MODULE_STRING */
23 23
24#define OPTION_UNSET -1 24#define OPTION_UNSET -1
25#define OPTION_DISABLED 0 25#define OPTION_DISABLED 0
diff --git a/drivers/net/ethernet/pasemi/Makefile b/drivers/net/ethernet/pasemi/Makefile
index 05db5434bafc..90497ffb1ac3 100644
--- a/drivers/net/ethernet/pasemi/Makefile
+++ b/drivers/net/ethernet/pasemi/Makefile
@@ -2,4 +2,5 @@
2# Makefile for the A Semi network device drivers. 2# Makefile for the A Semi network device drivers.
3# 3#
4 4
5obj-$(CONFIG_PASEMI_MAC) += pasemi_mac.o pasemi_mac_ethtool.o 5obj-$(CONFIG_PASEMI_MAC) += pasemi_mac_driver.o
6pasemi_mac_driver-objs := pasemi_mac.o pasemi_mac_ethtool.o
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h
index 8731f79c9efc..b8478aab050e 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge.h
+++ b/drivers/net/ethernet/qlogic/qlge/qlge.h
@@ -58,10 +58,8 @@
58 58
59 59
60#define TX_DESC_PER_IOCB 8 60#define TX_DESC_PER_IOCB 8
61/* The maximum number of frags we handle is based 61
62 * on PAGE_SIZE... 62#if ((MAX_SKB_FRAGS - TX_DESC_PER_IOCB) + 2) > 0
63 */
64#if (PAGE_SHIFT == 12) || (PAGE_SHIFT == 13) /* 4k & 8k pages */
65#define TX_DESC_PER_OAL ((MAX_SKB_FRAGS - TX_DESC_PER_IOCB) + 2) 63#define TX_DESC_PER_OAL ((MAX_SKB_FRAGS - TX_DESC_PER_IOCB) + 2)
66#else /* all other page sizes */ 64#else /* all other page sizes */
67#define TX_DESC_PER_OAL 0 65#define TX_DESC_PER_OAL 0
@@ -1353,7 +1351,7 @@ struct tx_ring_desc {
1353 struct ob_mac_iocb_req *queue_entry; 1351 struct ob_mac_iocb_req *queue_entry;
1354 u32 index; 1352 u32 index;
1355 struct oal oal; 1353 struct oal oal;
1356 struct map_list map[MAX_SKB_FRAGS + 1]; 1354 struct map_list map[MAX_SKB_FRAGS + 2];
1357 int map_cnt; 1355 int map_cnt;
1358 struct tx_ring_desc *next; 1356 struct tx_ring_desc *next;
1359}; 1357};
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index 1fc01ca72b46..4bf68cfef390 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -940,7 +940,7 @@ static void r6040_multicast_list(struct net_device *dev)
940 iowrite16(lp->mcr0, ioaddr + MCR0); 940 iowrite16(lp->mcr0, ioaddr + MCR0);
941 941
942 /* Fill the MAC hash tables with their values */ 942 /* Fill the MAC hash tables with their values */
943 if (lp->mcr0 && MCR0_HASH_EN) { 943 if (lp->mcr0 & MCR0_HASH_EN) {
944 iowrite16(hash_table[0], ioaddr + MAR0); 944 iowrite16(hash_table[0], ioaddr + MAR0);
945 iowrite16(hash_table[1], ioaddr + MAR1); 945 iowrite16(hash_table[1], ioaddr + MAR1);
946 iowrite16(hash_table[2], ioaddr + MAR2); 946 iowrite16(hash_table[2], ioaddr + MAR2);
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 92b45f08858f..67bf07819992 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -1183,11 +1183,13 @@ static u8 rtl8168d_efuse_read(void __iomem *ioaddr, int reg_addr)
1183 return value; 1183 return value;
1184} 1184}
1185 1185
1186static void rtl8169_irq_mask_and_ack(void __iomem *ioaddr) 1186static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp)
1187{ 1187{
1188 RTL_W16(IntrMask, 0x0000); 1188 void __iomem *ioaddr = tp->mmio_addr;
1189 1189
1190 RTL_W16(IntrStatus, 0xffff); 1190 RTL_W16(IntrMask, 0x0000);
1191 RTL_W16(IntrStatus, tp->intr_event);
1192 RTL_R8(ChipCmd);
1191} 1193}
1192 1194
1193static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp) 1195static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp)
@@ -1292,7 +1294,7 @@ static void __rtl8169_check_link_status(struct net_device *dev,
1292 netif_carrier_off(dev); 1294 netif_carrier_off(dev);
1293 netif_info(tp, ifdown, dev, "link down\n"); 1295 netif_info(tp, ifdown, dev, "link down\n");
1294 if (pm) 1296 if (pm)
1295 pm_schedule_suspend(&tp->pci_dev->dev, 100); 1297 pm_schedule_suspend(&tp->pci_dev->dev, 5000);
1296 } 1298 }
1297 spin_unlock_irqrestore(&tp->lock, flags); 1299 spin_unlock_irqrestore(&tp->lock, flags);
1298} 1300}
@@ -3933,8 +3935,6 @@ static void rtl_hw_reset(struct rtl8169_private *tp)
3933 break; 3935 break;
3934 udelay(100); 3936 udelay(100);
3935 } 3937 }
3936
3937 rtl8169_init_ring_indexes(tp);
3938} 3938}
3939 3939
3940static int __devinit 3940static int __devinit
@@ -4339,7 +4339,7 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
4339 void __iomem *ioaddr = tp->mmio_addr; 4339 void __iomem *ioaddr = tp->mmio_addr;
4340 4340
4341 /* Disable interrupts */ 4341 /* Disable interrupts */
4342 rtl8169_irq_mask_and_ack(ioaddr); 4342 rtl8169_irq_mask_and_ack(tp);
4343 4343
4344 rtl_rx_close(tp); 4344 rtl_rx_close(tp);
4345 4345
@@ -4885,8 +4885,7 @@ static void rtl_hw_start_8168(struct net_device *dev)
4885 RTL_W16(IntrMitigate, 0x5151); 4885 RTL_W16(IntrMitigate, 0x5151);
4886 4886
4887 /* Work around for RxFIFO overflow. */ 4887 /* Work around for RxFIFO overflow. */
4888 if (tp->mac_version == RTL_GIGA_MAC_VER_11 || 4888 if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
4889 tp->mac_version == RTL_GIGA_MAC_VER_22) {
4890 tp->intr_event |= RxFIFOOver | PCSTimeout; 4889 tp->intr_event |= RxFIFOOver | PCSTimeout;
4891 tp->intr_event &= ~RxOverflow; 4890 tp->intr_event &= ~RxOverflow;
4892 } 4891 }
@@ -5076,6 +5075,11 @@ static void rtl_hw_start_8101(struct net_device *dev)
5076 void __iomem *ioaddr = tp->mmio_addr; 5075 void __iomem *ioaddr = tp->mmio_addr;
5077 struct pci_dev *pdev = tp->pci_dev; 5076 struct pci_dev *pdev = tp->pci_dev;
5078 5077
5078 if (tp->mac_version >= RTL_GIGA_MAC_VER_30) {
5079 tp->intr_event &= ~RxFIFOOver;
5080 tp->napi_event &= ~RxFIFOOver;
5081 }
5082
5079 if (tp->mac_version == RTL_GIGA_MAC_VER_13 || 5083 if (tp->mac_version == RTL_GIGA_MAC_VER_13 ||
5080 tp->mac_version == RTL_GIGA_MAC_VER_16) { 5084 tp->mac_version == RTL_GIGA_MAC_VER_16) {
5081 int cap = pci_pcie_cap(pdev); 5085 int cap = pci_pcie_cap(pdev);
@@ -5342,7 +5346,7 @@ static void rtl8169_wait_for_quiescence(struct net_device *dev)
5342 /* Wait for any pending NAPI task to complete */ 5346 /* Wait for any pending NAPI task to complete */
5343 napi_disable(&tp->napi); 5347 napi_disable(&tp->napi);
5344 5348
5345 rtl8169_irq_mask_and_ack(ioaddr); 5349 rtl8169_irq_mask_and_ack(tp);
5346 5350
5347 tp->intr_mask = 0xffff; 5351 tp->intr_mask = 0xffff;
5348 RTL_W16(IntrMask, tp->intr_event); 5352 RTL_W16(IntrMask, tp->intr_event);
@@ -5389,14 +5393,16 @@ static void rtl8169_reset_task(struct work_struct *work)
5389 if (!netif_running(dev)) 5393 if (!netif_running(dev))
5390 goto out_unlock; 5394 goto out_unlock;
5391 5395
5396 rtl8169_hw_reset(tp);
5397
5392 rtl8169_wait_for_quiescence(dev); 5398 rtl8169_wait_for_quiescence(dev);
5393 5399
5394 for (i = 0; i < NUM_RX_DESC; i++) 5400 for (i = 0; i < NUM_RX_DESC; i++)
5395 rtl8169_mark_to_asic(tp->RxDescArray + i, rx_buf_sz); 5401 rtl8169_mark_to_asic(tp->RxDescArray + i, rx_buf_sz);
5396 5402
5397 rtl8169_tx_clear(tp); 5403 rtl8169_tx_clear(tp);
5404 rtl8169_init_ring_indexes(tp);
5398 5405
5399 rtl8169_hw_reset(tp);
5400 rtl_hw_start(dev); 5406 rtl_hw_start(dev);
5401 netif_wake_queue(dev); 5407 netif_wake_queue(dev);
5402 rtl8169_check_link_status(dev, tp, tp->mmio_addr); 5408 rtl8169_check_link_status(dev, tp, tp->mmio_addr);
@@ -5407,11 +5413,6 @@ out_unlock:
5407 5413
5408static void rtl8169_tx_timeout(struct net_device *dev) 5414static void rtl8169_tx_timeout(struct net_device *dev)
5409{ 5415{
5410 struct rtl8169_private *tp = netdev_priv(dev);
5411
5412 rtl8169_hw_reset(tp);
5413
5414 /* Let's wait a bit while any (async) irq lands on */
5415 rtl8169_schedule_work(dev, rtl8169_reset_task); 5416 rtl8169_schedule_work(dev, rtl8169_reset_task);
5416} 5417}
5417 5418
@@ -5804,6 +5805,10 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
5804 */ 5805 */
5805 status = RTL_R16(IntrStatus); 5806 status = RTL_R16(IntrStatus);
5806 while (status && status != 0xffff) { 5807 while (status && status != 0xffff) {
5808 status &= tp->intr_event;
5809 if (!status)
5810 break;
5811
5807 handled = 1; 5812 handled = 1;
5808 5813
5809 /* Handle all of the error cases first. These will reset 5814 /* Handle all of the error cases first. These will reset
@@ -5818,27 +5823,9 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
5818 switch (tp->mac_version) { 5823 switch (tp->mac_version) {
5819 /* Work around for rx fifo overflow */ 5824 /* Work around for rx fifo overflow */
5820 case RTL_GIGA_MAC_VER_11: 5825 case RTL_GIGA_MAC_VER_11:
5821 case RTL_GIGA_MAC_VER_22:
5822 case RTL_GIGA_MAC_VER_26:
5823 netif_stop_queue(dev); 5826 netif_stop_queue(dev);
5824 rtl8169_tx_timeout(dev); 5827 rtl8169_tx_timeout(dev);
5825 goto done; 5828 goto done;
5826 /* Testers needed. */
5827 case RTL_GIGA_MAC_VER_17:
5828 case RTL_GIGA_MAC_VER_19:
5829 case RTL_GIGA_MAC_VER_20:
5830 case RTL_GIGA_MAC_VER_21:
5831 case RTL_GIGA_MAC_VER_23:
5832 case RTL_GIGA_MAC_VER_24:
5833 case RTL_GIGA_MAC_VER_27:
5834 case RTL_GIGA_MAC_VER_28:
5835 case RTL_GIGA_MAC_VER_31:
5836 /* Experimental science. Pktgen proof. */
5837 case RTL_GIGA_MAC_VER_12:
5838 case RTL_GIGA_MAC_VER_25:
5839 if (status == RxFIFOOver)
5840 goto done;
5841 break;
5842 default: 5829 default:
5843 break; 5830 break;
5844 } 5831 }
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index d2be42aafbef..8843071fe987 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -1937,6 +1937,7 @@ static int __devinit smsc911x_init(struct net_device *dev)
1937{ 1937{
1938 struct smsc911x_data *pdata = netdev_priv(dev); 1938 struct smsc911x_data *pdata = netdev_priv(dev);
1939 unsigned int byte_test; 1939 unsigned int byte_test;
1940 unsigned int to = 100;
1940 1941
1941 SMSC_TRACE(pdata, probe, "Driver Parameters:"); 1942 SMSC_TRACE(pdata, probe, "Driver Parameters:");
1942 SMSC_TRACE(pdata, probe, "LAN base: 0x%08lX", 1943 SMSC_TRACE(pdata, probe, "LAN base: 0x%08lX",
@@ -1952,6 +1953,17 @@ static int __devinit smsc911x_init(struct net_device *dev)
1952 return -ENODEV; 1953 return -ENODEV;
1953 } 1954 }
1954 1955
1956 /*
1957 * poll the READY bit in PMT_CTRL. Any other access to the device is
1958 * forbidden while this bit isn't set. Try for 100ms
1959 */
1960 while (!(smsc911x_reg_read(pdata, PMT_CTRL) & PMT_CTRL_READY_) && --to)
1961 udelay(1000);
1962 if (to == 0) {
1963 pr_err("Device not READY in 100ms aborting\n");
1964 return -ENODEV;
1965 }
1966
1955 /* Check byte ordering */ 1967 /* Check byte ordering */
1956 byte_test = smsc911x_reg_read(pdata, BYTE_TEST); 1968 byte_test = smsc911x_reg_read(pdata, BYTE_TEST);
1957 SMSC_TRACE(pdata, probe, "BYTE_TEST: 0x%08X", byte_test); 1969 SMSC_TRACE(pdata, probe, "BYTE_TEST: 0x%08X", byte_test);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
index da66ac511c4c..4d5402a1d262 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
@@ -39,10 +39,11 @@ static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, u32 dma_tx,
39 /* DMA SW reset */ 39 /* DMA SW reset */
40 value |= DMA_BUS_MODE_SFT_RESET; 40 value |= DMA_BUS_MODE_SFT_RESET;
41 writel(value, ioaddr + DMA_BUS_MODE); 41 writel(value, ioaddr + DMA_BUS_MODE);
42 limit = 15000; 42 limit = 10;
43 while (limit--) { 43 while (limit--) {
44 if (!(readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET)) 44 if (!(readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET))
45 break; 45 break;
46 mdelay(10);
46 } 47 }
47 if (limit < 0) 48 if (limit < 0)
48 return -EBUSY; 49 return -EBUSY;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
index 627f656b0f3c..bc17fd08b55d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
@@ -41,10 +41,11 @@ static int dwmac100_dma_init(void __iomem *ioaddr, int pbl, u32 dma_tx,
41 /* DMA SW reset */ 41 /* DMA SW reset */
42 value |= DMA_BUS_MODE_SFT_RESET; 42 value |= DMA_BUS_MODE_SFT_RESET;
43 writel(value, ioaddr + DMA_BUS_MODE); 43 writel(value, ioaddr + DMA_BUS_MODE);
44 limit = 15000; 44 limit = 10;
45 while (limit--) { 45 while (limit--) {
46 if (!(readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET)) 46 if (!(readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET))
47 break; 47 break;
48 mdelay(10);
48 } 49 }
49 if (limit < 0) 50 if (limit < 0)
50 return -EBUSY; 51 return -EBUSY;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 9bafa6cf9e8b..a140a8fbf051 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -72,7 +72,6 @@ struct stmmac_priv {
72 spinlock_t lock; 72 spinlock_t lock;
73 spinlock_t tx_lock; 73 spinlock_t tx_lock;
74 int wolopts; 74 int wolopts;
75 int wolenabled;
76 int wol_irq; 75 int wol_irq;
77#ifdef CONFIG_STMMAC_TIMER 76#ifdef CONFIG_STMMAC_TIMER
78 struct stmmac_timer *tm; 77 struct stmmac_timer *tm;
@@ -80,6 +79,7 @@ struct stmmac_priv {
80 struct plat_stmmacenet_data *plat; 79 struct plat_stmmacenet_data *plat;
81 struct stmmac_counters mmc; 80 struct stmmac_counters mmc;
82 struct dma_features dma_cap; 81 struct dma_features dma_cap;
82 int hw_cap_support;
83}; 83};
84 84
85extern int stmmac_mdio_unregister(struct net_device *ndev); 85extern int stmmac_mdio_unregister(struct net_device *ndev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index e8eff09bbbd7..0395f9eba801 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -430,6 +430,12 @@ static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
430 struct stmmac_priv *priv = netdev_priv(dev); 430 struct stmmac_priv *priv = netdev_priv(dev);
431 u32 support = WAKE_MAGIC | WAKE_UCAST; 431 u32 support = WAKE_MAGIC | WAKE_UCAST;
432 432
433 /* By default almost all GMAC devices support the WoL via
434 * magic frame but we can disable it if the HW capability
435 * register shows no support for pmt_magic_frame. */
436 if ((priv->hw_cap_support) && (!priv->dma_cap.pmt_magic_frame))
437 wol->wolopts &= ~WAKE_MAGIC;
438
433 if (!device_can_wakeup(priv->device)) 439 if (!device_can_wakeup(priv->device))
434 return -EINVAL; 440 return -EINVAL;
435 441
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 20546bbbb8db..72cd190b9c1a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -321,12 +321,10 @@ static int stmmac_init_phy(struct net_device *dev)
321 } 321 }
322 322
323 /* Stop Advertising 1000BASE Capability if interface is not GMII */ 323 /* Stop Advertising 1000BASE Capability if interface is not GMII */
324 if ((interface) && ((interface == PHY_INTERFACE_MODE_MII) || 324 if ((interface == PHY_INTERFACE_MODE_MII) ||
325 (interface == PHY_INTERFACE_MODE_RMII))) { 325 (interface == PHY_INTERFACE_MODE_RMII))
326 phydev->supported &= (PHY_BASIC_FEATURES | SUPPORTED_Pause | 326 phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
327 SUPPORTED_Asym_Pause); 327 SUPPORTED_1000baseT_Full);
328 phydev->advertising = phydev->supported;
329 }
330 328
331 /* 329 /*
332 * Broken HW is sometimes missing the pull-up resistor on the 330 * Broken HW is sometimes missing the pull-up resistor on the
@@ -783,10 +781,15 @@ static void stmmac_mmc_setup(struct stmmac_priv *priv)
783 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET | 781 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
784 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET; 782 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
785 783
786 /* Do not manage MMC IRQ (FIXME) */ 784 /* Mask MMC irq, counters are managed in SW and registers
785 * are cleared on each READ eventually. */
787 dwmac_mmc_intr_all_mask(priv->ioaddr); 786 dwmac_mmc_intr_all_mask(priv->ioaddr);
788 dwmac_mmc_ctrl(priv->ioaddr, mode); 787
789 memset(&priv->mmc, 0, sizeof(struct stmmac_counters)); 788 if (priv->dma_cap.rmon) {
789 dwmac_mmc_ctrl(priv->ioaddr, mode);
790 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
791 } else
792 pr_info(" No MAC Management Counters available");
790} 793}
791 794
792static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv) 795static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv)
@@ -807,8 +810,29 @@ static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv)
807 return 0; 810 return 0;
808} 811}
809 812
810/* New GMAC chips support a new register to indicate the 813/**
811 * presence of the optional feature/functions. 814 * stmmac_selec_desc_mode
815 * @dev : device pointer
816 * Description: select the Enhanced/Alternate or Normal descriptors */
817static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
818{
819 if (priv->plat->enh_desc) {
820 pr_info(" Enhanced/Alternate descriptors\n");
821 priv->hw->desc = &enh_desc_ops;
822 } else {
823 pr_info(" Normal descriptors\n");
824 priv->hw->desc = &ndesc_ops;
825 }
826}
827
828/**
829 * stmmac_get_hw_features
830 * @priv : private device pointer
831 * Description:
832 * new GMAC chip generations have a new register to indicate the
833 * presence of the optional feature/functions.
834 * This can be also used to override the value passed through the
835 * platform and necessary for old MAC10/100 and GMAC chips.
812 */ 836 */
813static int stmmac_get_hw_features(struct stmmac_priv *priv) 837static int stmmac_get_hw_features(struct stmmac_priv *priv)
814{ 838{
@@ -829,7 +853,7 @@ static int stmmac_get_hw_features(struct stmmac_priv *priv)
829 (hw_cap & DMA_HW_FEAT_RWKSEL) >> 9; 853 (hw_cap & DMA_HW_FEAT_RWKSEL) >> 9;
830 priv->dma_cap.pmt_magic_frame = 854 priv->dma_cap.pmt_magic_frame =
831 (hw_cap & DMA_HW_FEAT_MGKSEL) >> 10; 855 (hw_cap & DMA_HW_FEAT_MGKSEL) >> 10;
832 /*MMC*/ 856 /* MMC */
833 priv->dma_cap.rmon = (hw_cap & DMA_HW_FEAT_MMCSEL) >> 11; 857 priv->dma_cap.rmon = (hw_cap & DMA_HW_FEAT_MMCSEL) >> 11;
834 /* IEEE 1588-2002*/ 858 /* IEEE 1588-2002*/
835 priv->dma_cap.time_stamp = 859 priv->dma_cap.time_stamp =
@@ -857,8 +881,7 @@ static int stmmac_get_hw_features(struct stmmac_priv *priv)
857 priv->dma_cap.enh_desc = 881 priv->dma_cap.enh_desc =
858 (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24; 882 (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24;
859 883
860 } else 884 }
861 pr_debug("\tNo HW DMA feature register supported");
862 885
863 return hw_cap; 886 return hw_cap;
864} 887}
@@ -913,6 +936,44 @@ static int stmmac_open(struct net_device *dev)
913 goto open_error; 936 goto open_error;
914 } 937 }
915 938
939 stmmac_get_synopsys_id(priv);
940
941 priv->hw_cap_support = stmmac_get_hw_features(priv);
942
943 if (priv->hw_cap_support) {
944 pr_info(" Support DMA HW capability register");
945
946 /* We can override some gmac/dma configuration fields: e.g.
947 * enh_desc, tx_coe (e.g. that are passed through the
948 * platform) with the values from the HW capability
949 * register (if supported).
950 */
951 priv->plat->enh_desc = priv->dma_cap.enh_desc;
952 priv->plat->tx_coe = priv->dma_cap.tx_coe;
953 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
954
955 /* By default disable wol on magic frame if not supported */
956 if (!priv->dma_cap.pmt_magic_frame)
957 priv->wolopts &= ~WAKE_MAGIC;
958
959 } else
960 pr_info(" No HW DMA feature register supported");
961
962 /* Select the enhnaced/normal descriptor structures */
963 stmmac_selec_desc_mode(priv);
964
965 /* PMT module is not integrated in all the MAC devices. */
966 if (priv->plat->pmt) {
967 pr_info(" Remote wake-up capable\n");
968 device_set_wakeup_capable(priv->device, 1);
969 }
970
971 priv->rx_coe = priv->hw->mac->rx_coe(priv->ioaddr);
972 if (priv->rx_coe)
973 pr_info(" Checksum Offload Engine supported\n");
974 if (priv->plat->tx_coe)
975 pr_info(" Checksum insertion supported\n");
976
916 /* Create and initialize the TX/RX descriptors chains. */ 977 /* Create and initialize the TX/RX descriptors chains. */
917 priv->dma_tx_size = STMMAC_ALIGN(dma_txsize); 978 priv->dma_tx_size = STMMAC_ALIGN(dma_txsize);
918 priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize); 979 priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize);
@@ -935,15 +996,6 @@ static int stmmac_open(struct net_device *dev)
935 /* Initialize the MAC Core */ 996 /* Initialize the MAC Core */
936 priv->hw->mac->core_init(priv->ioaddr); 997 priv->hw->mac->core_init(priv->ioaddr);
937 998
938 stmmac_get_synopsys_id(priv);
939
940 stmmac_get_hw_features(priv);
941
942 priv->rx_coe = priv->hw->mac->rx_coe(priv->ioaddr);
943 if (priv->rx_coe)
944 pr_info("stmmac: Rx Checksum Offload Engine supported\n");
945 if (priv->plat->tx_coe)
946 pr_info("\tTX Checksum insertion supported\n");
947 netdev_update_features(dev); 999 netdev_update_features(dev);
948 1000
949 /* Request the IRQ lines */ 1001 /* Request the IRQ lines */
@@ -965,8 +1017,7 @@ static int stmmac_open(struct net_device *dev)
965 memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats)); 1017 memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
966 priv->xstats.threshold = tc; 1018 priv->xstats.threshold = tc;
967 1019
968 if (priv->dma_cap.rmon) 1020 stmmac_mmc_setup(priv);
969 stmmac_mmc_setup(priv);
970 1021
971 /* Start the ball rolling... */ 1022 /* Start the ball rolling... */
972 DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name); 1023 DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name);
@@ -1489,9 +1540,7 @@ static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1489 if (!priv->phydev) 1540 if (!priv->phydev)
1490 return -EINVAL; 1541 return -EINVAL;
1491 1542
1492 spin_lock(&priv->lock);
1493 ret = phy_mii_ioctl(priv->phydev, rq, cmd); 1543 ret = phy_mii_ioctl(priv->phydev, rq, cmd);
1494 spin_unlock(&priv->lock);
1495 1544
1496 return ret; 1545 return ret;
1497} 1546}
@@ -1558,7 +1607,7 @@ static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
1558 struct net_device *dev = seq->private; 1607 struct net_device *dev = seq->private;
1559 struct stmmac_priv *priv = netdev_priv(dev); 1608 struct stmmac_priv *priv = netdev_priv(dev);
1560 1609
1561 if (!stmmac_get_hw_features(priv)) { 1610 if (!priv->hw_cap_support) {
1562 seq_printf(seq, "DMA HW features not supported\n"); 1611 seq_printf(seq, "DMA HW features not supported\n");
1563 return 0; 1612 return 0;
1564 } 1613 }
@@ -1766,12 +1815,6 @@ static int stmmac_mac_device_setup(struct net_device *dev)
1766 if (!device) 1815 if (!device)
1767 return -ENOMEM; 1816 return -ENOMEM;
1768 1817
1769 if (priv->plat->enh_desc) {
1770 device->desc = &enh_desc_ops;
1771 pr_info("\tEnhanced descriptor structure\n");
1772 } else
1773 device->desc = &ndesc_ops;
1774
1775 priv->hw = device; 1818 priv->hw = device;
1776 priv->hw->ring = &ring_mode_ops; 1819 priv->hw->ring = &ring_mode_ops;
1777 1820
@@ -1845,11 +1888,6 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
1845 1888
1846 priv->ioaddr = addr; 1889 priv->ioaddr = addr;
1847 1890
1848 /* PMT module is not integrated in all the MAC devices. */
1849 if (plat_dat->pmt) {
1850 pr_info("\tPMT module supported\n");
1851 device_set_wakeup_capable(&pdev->dev, 1);
1852 }
1853 /* 1891 /*
1854 * On some platforms e.g. SPEAr the wake up irq differs from the mac irq 1892 * On some platforms e.g. SPEAr the wake up irq differs from the mac irq
1855 * The external wake up irq can be passed through the platform code 1893 * The external wake up irq can be passed through the platform code
@@ -1862,7 +1900,6 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
1862 if (priv->wol_irq == -ENXIO) 1900 if (priv->wol_irq == -ENXIO)
1863 priv->wol_irq = ndev->irq; 1901 priv->wol_irq = ndev->irq;
1864 1902
1865
1866 platform_set_drvdata(pdev, ndev); 1903 platform_set_drvdata(pdev, ndev);
1867 1904
1868 /* Set the I/O base addr */ 1905 /* Set the I/O base addr */
@@ -1875,7 +1912,7 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
1875 goto out_free_ndev; 1912 goto out_free_ndev;
1876 } 1913 }
1877 1914
1878 /* MAC HW revice detection */ 1915 /* MAC HW device detection */
1879 ret = stmmac_mac_device_setup(ndev); 1916 ret = stmmac_mac_device_setup(ndev);
1880 if (ret < 0) 1917 if (ret < 0)
1881 goto out_plat_exit; 1918 goto out_plat_exit;
@@ -1978,12 +2015,13 @@ static int stmmac_suspend(struct device *dev)
1978 if (!ndev || !netif_running(ndev)) 2015 if (!ndev || !netif_running(ndev))
1979 return 0; 2016 return 0;
1980 2017
2018 if (priv->phydev)
2019 phy_stop(priv->phydev);
2020
1981 spin_lock(&priv->lock); 2021 spin_lock(&priv->lock);
1982 2022
1983 netif_device_detach(ndev); 2023 netif_device_detach(ndev);
1984 netif_stop_queue(ndev); 2024 netif_stop_queue(ndev);
1985 if (priv->phydev)
1986 phy_stop(priv->phydev);
1987 2025
1988#ifdef CONFIG_STMMAC_TIMER 2026#ifdef CONFIG_STMMAC_TIMER
1989 priv->tm->timer_stop(); 2027 priv->tm->timer_stop();
@@ -2041,12 +2079,13 @@ static int stmmac_resume(struct device *dev)
2041#endif 2079#endif
2042 napi_enable(&priv->napi); 2080 napi_enable(&priv->napi);
2043 2081
2044 if (priv->phydev)
2045 phy_start(priv->phydev);
2046
2047 netif_start_queue(ndev); 2082 netif_start_queue(ndev);
2048 2083
2049 spin_unlock(&priv->lock); 2084 spin_unlock(&priv->lock);
2085
2086 if (priv->phydev)
2087 phy_start(priv->phydev);
2088
2050 return 0; 2089 return 0;
2051} 2090}
2052 2091
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index c517dac02ae1..cf14ab9db576 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -2637,7 +2637,7 @@ static int __devinit happy_meal_sbus_probe_one(struct platform_device *op, int i
2637 sbus_dp = op->dev.parent->of_node; 2637 sbus_dp = op->dev.parent->of_node;
2638 2638
2639 /* We can match PCI devices too, do not accept those here. */ 2639 /* We can match PCI devices too, do not accept those here. */
2640 if (strcmp(sbus_dp->name, "sbus")) 2640 if (strcmp(sbus_dp->name, "sbus") && strcmp(sbus_dp->name, "sbi"))
2641 return err; 2641 return err;
2642 2642
2643 if (is_qfe) { 2643 if (is_qfe) {
diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c
index 10826d8a2a2d..1187a1169eb2 100644
--- a/drivers/net/ethernet/tile/tilepro.c
+++ b/drivers/net/ethernet/tile/tilepro.c
@@ -926,7 +926,7 @@ static int tile_net_poll(struct napi_struct *napi, int budget)
926 goto done; 926 goto done;
927 927
928 /* Re-enable the ingress interrupt. */ 928 /* Re-enable the ingress interrupt. */
929 enable_percpu_irq(priv->intr_id); 929 enable_percpu_irq(priv->intr_id, 0);
930 930
931 /* HACK: Avoid the "rotting packet" problem (see above). */ 931 /* HACK: Avoid the "rotting packet" problem (see above). */
932 if (qup->__packet_receive_read != 932 if (qup->__packet_receive_read !=
@@ -1296,7 +1296,7 @@ static void tile_net_open_enable(void *dev_ptr)
1296 info->napi_enabled = true; 1296 info->napi_enabled = true;
1297 1297
1298 /* Enable the ingress interrupt. */ 1298 /* Enable the ingress interrupt. */
1299 enable_percpu_irq(priv->intr_id); 1299 enable_percpu_irq(priv->intr_id, 0);
1300} 1300}
1301 1301
1302 1302
@@ -1697,7 +1697,7 @@ static unsigned int tile_net_tx_frags(lepp_frag_t *frags,
1697 for (i = 0; i < sh->nr_frags; i++) { 1697 for (i = 0; i < sh->nr_frags; i++) {
1698 1698
1699 skb_frag_t *f = &sh->frags[i]; 1699 skb_frag_t *f = &sh->frags[i];
1700 unsigned long pfn = page_to_pfn(f->page); 1700 unsigned long pfn = page_to_pfn(skb_frag_page(f));
1701 1701
1702 /* FIXME: Compute "hash_for_home" properly. */ 1702 /* FIXME: Compute "hash_for_home" properly. */
1703 /* ISSUE: The hypervisor checks CHIP_HAS_REV1_DMA_PACKETS(). */ 1703 /* ISSUE: The hypervisor checks CHIP_HAS_REV1_DMA_PACKETS(). */
@@ -1706,7 +1706,7 @@ static unsigned int tile_net_tx_frags(lepp_frag_t *frags,
1706 /* FIXME: Hmmm. */ 1706 /* FIXME: Hmmm. */
1707 if (!hash_default) { 1707 if (!hash_default) {
1708 void *va = pfn_to_kaddr(pfn) + f->page_offset; 1708 void *va = pfn_to_kaddr(pfn) + f->page_offset;
1709 BUG_ON(PageHighMem(f->page)); 1709 BUG_ON(PageHighMem(skb_frag_page(f)));
1710 finv_buffer_remote(va, f->size, 0); 1710 finv_buffer_remote(va, f->size, 0);
1711 } 1711 }
1712 1712
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index caf3659e173c..2681b53820ee 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -114,6 +114,7 @@ void temac_indirect_out32(struct temac_local *lp, int reg, u32 value)
114 return; 114 return;
115 temac_iow(lp, XTE_LSW0_OFFSET, value); 115 temac_iow(lp, XTE_LSW0_OFFSET, value);
116 temac_iow(lp, XTE_CTL0_OFFSET, CNTLREG_WRITE_ENABLE_MASK | reg); 116 temac_iow(lp, XTE_CTL0_OFFSET, CNTLREG_WRITE_ENABLE_MASK | reg);
117 temac_indirect_busywait(lp);
117} 118}
118 119
119/** 120/**
@@ -203,6 +204,9 @@ static void temac_dma_bd_release(struct net_device *ndev)
203 struct temac_local *lp = netdev_priv(ndev); 204 struct temac_local *lp = netdev_priv(ndev);
204 int i; 205 int i;
205 206
207 /* Reset Local Link (DMA) */
208 lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST);
209
206 for (i = 0; i < RX_BD_NUM; i++) { 210 for (i = 0; i < RX_BD_NUM; i++) {
207 if (!lp->rx_skb[i]) 211 if (!lp->rx_skb[i])
208 break; 212 break;
@@ -860,6 +864,8 @@ static int temac_open(struct net_device *ndev)
860 phy_start(lp->phy_dev); 864 phy_start(lp->phy_dev);
861 } 865 }
862 866
867 temac_device_reset(ndev);
868
863 rc = request_irq(lp->tx_irq, ll_temac_tx_irq, 0, ndev->name, ndev); 869 rc = request_irq(lp->tx_irq, ll_temac_tx_irq, 0, ndev->name, ndev);
864 if (rc) 870 if (rc)
865 goto err_tx_irq; 871 goto err_tx_irq;
@@ -867,7 +873,6 @@ static int temac_open(struct net_device *ndev)
867 if (rc) 873 if (rc)
868 goto err_rx_irq; 874 goto err_rx_irq;
869 875
870 temac_device_reset(ndev);
871 return 0; 876 return 0;
872 877
873 err_rx_irq: 878 err_rx_irq:
diff --git a/drivers/net/hippi/Kconfig b/drivers/net/hippi/Kconfig
index 7393eb732ee6..95eb34fdbba7 100644
--- a/drivers/net/hippi/Kconfig
+++ b/drivers/net/hippi/Kconfig
@@ -36,4 +36,4 @@ config ROADRUNNER_LARGE_RINGS
36 kernel code or by user space programs. Say Y here only if you have 36 kernel code or by user space programs. Say Y here only if you have
37 the memory. 37 the memory.
38 38
39endif /* HIPPI */ 39endif # HIPPI
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index bb88e12101c7..a70244306c94 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -3,7 +3,7 @@
3# 3#
4 4
5menuconfig PHYLIB 5menuconfig PHYLIB
6 bool "PHY Device support and infrastructure" 6 tristate "PHY Device support and infrastructure"
7 depends on !S390 7 depends on !S390
8 depends on NETDEVICES 8 depends on NETDEVICES
9 help 9 help
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index 89f829f5f725..f8a6853b692e 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -423,10 +423,8 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
423 lock_sock(sk); 423 lock_sock(sk);
424 424
425 opt->src_addr = sp->sa_addr.pptp; 425 opt->src_addr = sp->sa_addr.pptp;
426 if (add_chan(po)) { 426 if (add_chan(po))
427 release_sock(sk);
428 error = -EBUSY; 427 error = -EBUSY;
429 }
430 428
431 release_sock(sk); 429 release_sock(sk);
432 return error; 430 return error;
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
index e81e22e3d1d2..e6fed4d4cb77 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -36,7 +36,7 @@
36#include <linux/usb/usbnet.h> 36#include <linux/usb/usbnet.h>
37#include <linux/slab.h> 37#include <linux/slab.h>
38 38
39#define DRIVER_VERSION "26-Sep-2011" 39#define DRIVER_VERSION "08-Nov-2011"
40#define DRIVER_NAME "asix" 40#define DRIVER_NAME "asix"
41 41
42/* ASIX AX8817X based USB 2.0 Ethernet Devices */ 42/* ASIX AX8817X based USB 2.0 Ethernet Devices */
@@ -163,7 +163,7 @@
163#define MARVELL_CTRL_TXDELAY 0x0002 163#define MARVELL_CTRL_TXDELAY 0x0002
164#define MARVELL_CTRL_RXDELAY 0x0080 164#define MARVELL_CTRL_RXDELAY 0x0080
165 165
166#define PHY_MODE_RTL8211CL 0x0004 166#define PHY_MODE_RTL8211CL 0x000C
167 167
168/* This structure cannot exceed sizeof(unsigned long [5]) AKA 20 bytes */ 168/* This structure cannot exceed sizeof(unsigned long [5]) AKA 20 bytes */
169struct asix_data { 169struct asix_data {
@@ -652,9 +652,17 @@ static u32 asix_get_phyid(struct usbnet *dev)
652{ 652{
653 int phy_reg; 653 int phy_reg;
654 u32 phy_id; 654 u32 phy_id;
655 int i;
655 656
656 phy_reg = asix_mdio_read(dev->net, dev->mii.phy_id, MII_PHYSID1); 657 /* Poll for the rare case the FW or phy isn't ready yet. */
657 if (phy_reg < 0) 658 for (i = 0; i < 100; i++) {
659 phy_reg = asix_mdio_read(dev->net, dev->mii.phy_id, MII_PHYSID1);
660 if (phy_reg != 0 && phy_reg != 0xFFFF)
661 break;
662 mdelay(1);
663 }
664
665 if (phy_reg <= 0 || phy_reg == 0xFFFF)
658 return 0; 666 return 0;
659 667
660 phy_id = (phy_reg & 0xffff) << 16; 668 phy_id = (phy_reg & 0xffff) << 16;
@@ -1075,7 +1083,7 @@ static const struct net_device_ops ax88772_netdev_ops = {
1075 1083
1076static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf) 1084static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
1077{ 1085{
1078 int ret; 1086 int ret, embd_phy;
1079 struct asix_data *data = (struct asix_data *)&dev->data; 1087 struct asix_data *data = (struct asix_data *)&dev->data;
1080 u8 buf[ETH_ALEN]; 1088 u8 buf[ETH_ALEN];
1081 u32 phyid; 1089 u32 phyid;
@@ -1100,16 +1108,36 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
1100 dev->mii.reg_num_mask = 0x1f; 1108 dev->mii.reg_num_mask = 0x1f;
1101 dev->mii.phy_id = asix_get_phy_addr(dev); 1109 dev->mii.phy_id = asix_get_phy_addr(dev);
1102 1110
1103 phyid = asix_get_phyid(dev);
1104 dbg("PHYID=0x%08x", phyid);
1105
1106 dev->net->netdev_ops = &ax88772_netdev_ops; 1111 dev->net->netdev_ops = &ax88772_netdev_ops;
1107 dev->net->ethtool_ops = &ax88772_ethtool_ops; 1112 dev->net->ethtool_ops = &ax88772_ethtool_ops;
1108 1113
1109 ret = ax88772_reset(dev); 1114 embd_phy = ((dev->mii.phy_id & 0x1f) == 0x10 ? 1 : 0);
1115
1116 /* Reset the PHY to normal operation mode */
1117 ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, embd_phy, 0, 0, NULL);
1118 if (ret < 0) {
1119 dbg("Select PHY #1 failed: %d", ret);
1120 return ret;
1121 }
1122
1123 ret = asix_sw_reset(dev, AX_SWRESET_IPPD | AX_SWRESET_PRL);
1124 if (ret < 0)
1125 return ret;
1126
1127 msleep(150);
1128
1129 ret = asix_sw_reset(dev, AX_SWRESET_CLEAR);
1110 if (ret < 0) 1130 if (ret < 0)
1111 return ret; 1131 return ret;
1112 1132
1133 msleep(150);
1134
1135 ret = asix_sw_reset(dev, embd_phy ? AX_SWRESET_IPRL : AX_SWRESET_PRTE);
1136
1137 /* Read PHYID register *AFTER* the PHY was reset properly */
1138 phyid = asix_get_phyid(dev);
1139 dbg("PHYID=0x%08x", phyid);
1140
1113 /* Asix framing packs multiple eth frames into a 2K usb bulk transfer */ 1141 /* Asix framing packs multiple eth frames into a 2K usb bulk transfer */
1114 if (dev->driver_info->flags & FLAG_FRAMING_AX) { 1142 if (dev->driver_info->flags & FLAG_FRAMING_AX) {
1115 /* hard_mtu is still the default - the device does not support 1143 /* hard_mtu is still the default - the device does not support
@@ -1220,6 +1248,7 @@ static int ax88178_reset(struct usbnet *dev)
1220 __le16 eeprom; 1248 __le16 eeprom;
1221 u8 status; 1249 u8 status;
1222 int gpio0 = 0; 1250 int gpio0 = 0;
1251 u32 phyid;
1223 1252
1224 asix_read_cmd(dev, AX_CMD_READ_GPIOS, 0, 0, 1, &status); 1253 asix_read_cmd(dev, AX_CMD_READ_GPIOS, 0, 0, 1, &status);
1225 dbg("GPIO Status: 0x%04x", status); 1254 dbg("GPIO Status: 0x%04x", status);
@@ -1235,12 +1264,13 @@ static int ax88178_reset(struct usbnet *dev)
1235 data->ledmode = 0; 1264 data->ledmode = 0;
1236 gpio0 = 1; 1265 gpio0 = 1;
1237 } else { 1266 } else {
1238 data->phymode = le16_to_cpu(eeprom) & 7; 1267 data->phymode = le16_to_cpu(eeprom) & 0x7F;
1239 data->ledmode = le16_to_cpu(eeprom) >> 8; 1268 data->ledmode = le16_to_cpu(eeprom) >> 8;
1240 gpio0 = (le16_to_cpu(eeprom) & 0x80) ? 0 : 1; 1269 gpio0 = (le16_to_cpu(eeprom) & 0x80) ? 0 : 1;
1241 } 1270 }
1242 dbg("GPIO0: %d, PhyMode: %d", gpio0, data->phymode); 1271 dbg("GPIO0: %d, PhyMode: %d", gpio0, data->phymode);
1243 1272
1273 /* Power up external GigaPHY through AX88178 GPIO pin */
1244 asix_write_gpio(dev, AX_GPIO_RSE | AX_GPIO_GPO_1 | AX_GPIO_GPO1EN, 40); 1274 asix_write_gpio(dev, AX_GPIO_RSE | AX_GPIO_GPO_1 | AX_GPIO_GPO1EN, 40);
1245 if ((le16_to_cpu(eeprom) >> 8) != 1) { 1275 if ((le16_to_cpu(eeprom) >> 8) != 1) {
1246 asix_write_gpio(dev, 0x003c, 30); 1276 asix_write_gpio(dev, 0x003c, 30);
@@ -1252,6 +1282,13 @@ static int ax88178_reset(struct usbnet *dev)
1252 asix_write_gpio(dev, AX_GPIO_GPO1EN | AX_GPIO_GPO_1, 30); 1282 asix_write_gpio(dev, AX_GPIO_GPO1EN | AX_GPIO_GPO_1, 30);
1253 } 1283 }
1254 1284
1285 /* Read PHYID register *AFTER* powering up PHY */
1286 phyid = asix_get_phyid(dev);
1287 dbg("PHYID=0x%08x", phyid);
1288
1289 /* Set AX88178 to enable MII/GMII/RGMII interface for external PHY */
1290 asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, 0, 0, 0, NULL);
1291
1255 asix_sw_reset(dev, 0); 1292 asix_sw_reset(dev, 0);
1256 msleep(150); 1293 msleep(150);
1257 1294
@@ -1396,7 +1433,6 @@ static int ax88178_bind(struct usbnet *dev, struct usb_interface *intf)
1396{ 1433{
1397 int ret; 1434 int ret;
1398 u8 buf[ETH_ALEN]; 1435 u8 buf[ETH_ALEN];
1399 u32 phyid;
1400 struct asix_data *data = (struct asix_data *)&dev->data; 1436 struct asix_data *data = (struct asix_data *)&dev->data;
1401 1437
1402 data->eeprom_len = AX88772_EEPROM_LEN; 1438 data->eeprom_len = AX88772_EEPROM_LEN;
@@ -1423,12 +1459,12 @@ static int ax88178_bind(struct usbnet *dev, struct usb_interface *intf)
1423 dev->net->netdev_ops = &ax88178_netdev_ops; 1459 dev->net->netdev_ops = &ax88178_netdev_ops;
1424 dev->net->ethtool_ops = &ax88178_ethtool_ops; 1460 dev->net->ethtool_ops = &ax88178_ethtool_ops;
1425 1461
1426 phyid = asix_get_phyid(dev); 1462 /* Blink LEDS so users know driver saw dongle */
1427 dbg("PHYID=0x%08x", phyid); 1463 asix_sw_reset(dev, 0);
1464 msleep(150);
1428 1465
1429 ret = ax88178_reset(dev); 1466 asix_sw_reset(dev, AX_SWRESET_PRL | AX_SWRESET_IPPD);
1430 if (ret < 0) 1467 msleep(150);
1431 return ret;
1432 1468
1433 /* Asix framing packs multiple eth frames into a 2K usb bulk transfer */ 1469 /* Asix framing packs multiple eth frames into a 2K usb bulk transfer */
1434 if (dev->driver_info->flags & FLAG_FRAMING_AX) { 1470 if (dev->driver_info->flags & FLAG_FRAMING_AX) {
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index c924ea2bce07..99ed6eb4dfaf 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -567,7 +567,7 @@ static const struct usb_device_id products [] = {
567{ 567{
568 USB_DEVICE_AND_INTERFACE_INFO(0x1004, 0x61aa, USB_CLASS_COMM, 568 USB_DEVICE_AND_INTERFACE_INFO(0x1004, 0x61aa, USB_CLASS_COMM,
569 USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), 569 USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
570 .driver_info = (unsigned long)&wwan_info, 570 .driver_info = 0,
571}, 571},
572 572
573/* 573/*
diff --git a/drivers/net/usb/lg-vl600.c b/drivers/net/usb/lg-vl600.c
index d43db32f9478..9c26c6390d69 100644
--- a/drivers/net/usb/lg-vl600.c
+++ b/drivers/net/usb/lg-vl600.c
@@ -144,10 +144,11 @@ static int vl600_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
144 } 144 }
145 145
146 frame = (struct vl600_frame_hdr *) buf->data; 146 frame = (struct vl600_frame_hdr *) buf->data;
147 /* NOTE: Should check that frame->magic == 0x53544448? 147 /* Yes, check that frame->magic == 0x53544448 (or 0x44544d48),
148 * Otherwise if we receive garbage at the beginning of the frame 148 * otherwise we may run out of memory w/a bad packet */
149 * we may end up allocating a huge buffer and saving all the 149 if (ntohl(frame->magic) != 0x53544448 &&
150 * future incoming data into it. */ 150 ntohl(frame->magic) != 0x44544d48)
151 goto error;
151 152
152 if (buf->len < sizeof(*frame) || 153 if (buf->len < sizeof(*frame) ||
153 buf->len != le32_to_cpup(&frame->len)) { 154 buf->len != le32_to_cpup(&frame->len)) {
@@ -296,6 +297,11 @@ encapsulate:
296 * overwrite the remaining fields. 297 * overwrite the remaining fields.
297 */ 298 */
298 packet = (struct vl600_pkt_hdr *) skb->data; 299 packet = (struct vl600_pkt_hdr *) skb->data;
300 /* The VL600 wants IPv6 packets to have an IPv4 ethertype
301 * Since this modem only supports IPv4 and IPv6, just set all
302 * frames to 0x0800 (ETH_P_IP)
303 */
304 packet->h_proto = htons(ETH_P_IP);
299 memset(&packet->dummy, 0, sizeof(packet->dummy)); 305 memset(&packet->dummy, 0, sizeof(packet->dummy));
300 packet->len = cpu_to_le32(orig_len); 306 packet->len = cpu_to_le32(orig_len);
301 307
@@ -308,21 +314,12 @@ encapsulate:
308 if (skb->len < full_len) /* Pad */ 314 if (skb->len < full_len) /* Pad */
309 skb_put(skb, full_len - skb->len); 315 skb_put(skb, full_len - skb->len);
310 316
311 /* The VL600 wants IPv6 packets to have an IPv4 ethertype
312 * Check if this is an IPv6 packet, and set the ethertype
313 * to 0x800
314 */
315 if ((skb->data[sizeof(struct vl600_pkt_hdr *) + 0x22] & 0xf0) == 0x60) {
316 skb->data[sizeof(struct vl600_pkt_hdr *) + 0x20] = 0x08;
317 skb->data[sizeof(struct vl600_pkt_hdr *) + 0x21] = 0;
318 }
319
320 return skb; 317 return skb;
321} 318}
322 319
323static const struct driver_info vl600_info = { 320static const struct driver_info vl600_info = {
324 .description = "LG VL600 modem", 321 .description = "LG VL600 modem",
325 .flags = FLAG_ETHER | FLAG_RX_ASSEMBLE, 322 .flags = FLAG_RX_ASSEMBLE | FLAG_WWAN,
326 .bind = vl600_bind, 323 .bind = vl600_bind,
327 .unbind = vl600_unbind, 324 .unbind = vl600_unbind,
328 .status = usbnet_cdc_status, 325 .status = usbnet_cdc_status,
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 22a7cf951e72..a5b9b12ef268 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -51,6 +51,7 @@
51#define USB_VENDOR_ID_SMSC (0x0424) 51#define USB_VENDOR_ID_SMSC (0x0424)
52#define USB_PRODUCT_ID_LAN7500 (0x7500) 52#define USB_PRODUCT_ID_LAN7500 (0x7500)
53#define USB_PRODUCT_ID_LAN7505 (0x7505) 53#define USB_PRODUCT_ID_LAN7505 (0x7505)
54#define RXW_PADDING 2
54 55
55#define check_warn(ret, fmt, args...) \ 56#define check_warn(ret, fmt, args...) \
56 ({ if (ret < 0) netdev_warn(dev->net, fmt, ##args); }) 57 ({ if (ret < 0) netdev_warn(dev->net, fmt, ##args); })
@@ -1088,13 +1089,13 @@ static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
1088 1089
1089 memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b)); 1090 memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
1090 le32_to_cpus(&rx_cmd_b); 1091 le32_to_cpus(&rx_cmd_b);
1091 skb_pull(skb, 4 + NET_IP_ALIGN); 1092 skb_pull(skb, 4 + RXW_PADDING);
1092 1093
1093 packet = skb->data; 1094 packet = skb->data;
1094 1095
1095 /* get the packet length */ 1096 /* get the packet length */
1096 size = (rx_cmd_a & RX_CMD_A_LEN) - NET_IP_ALIGN; 1097 size = (rx_cmd_a & RX_CMD_A_LEN) - RXW_PADDING;
1097 align_count = (4 - ((size + NET_IP_ALIGN) % 4)) % 4; 1098 align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
1098 1099
1099 if (unlikely(rx_cmd_a & RX_CMD_A_RED)) { 1100 if (unlikely(rx_cmd_a & RX_CMD_A_RED)) {
1100 netif_dbg(dev, rx_err, dev->net, 1101 netif_dbg(dev, rx_err, dev->net,
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 2f91acccb7db..8873c6e6fb96 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1827,7 +1827,8 @@ static void ath9k_set_power_sleep(struct ath_hw *ah, int setChip)
1827 } 1827 }
1828 1828
1829 /* Clear Bit 14 of AR_WA after putting chip into Full Sleep mode. */ 1829 /* Clear Bit 14 of AR_WA after putting chip into Full Sleep mode. */
1830 REG_WRITE(ah, AR_WA, ah->WARegVal & ~AR_WA_D3_L1_DISABLE); 1830 if (AR_SREV_9300_20_OR_LATER(ah))
1831 REG_WRITE(ah, AR_WA, ah->WARegVal & ~AR_WA_D3_L1_DISABLE);
1831} 1832}
1832 1833
1833/* 1834/*
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 93fbe6f40898..d2348a5a7809 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -286,7 +286,7 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
286 ath_start_ani(common); 286 ath_start_ani(common);
287 } 287 }
288 288
289 if (ath9k_hw_ops(ah)->antdiv_comb_conf_get && sc->ant_rx != 3) { 289 if ((ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) && sc->ant_rx != 3) {
290 struct ath_hw_antcomb_conf div_ant_conf; 290 struct ath_hw_antcomb_conf div_ant_conf;
291 u8 lna_conf; 291 u8 lna_conf;
292 292
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index 85fa9cc73502..65ecb5bab25a 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -254,6 +254,8 @@ ath_reg_apply_active_scan_flags(struct wiphy *wiphy,
254 int r; 254 int r;
255 255
256 sband = wiphy->bands[IEEE80211_BAND_2GHZ]; 256 sband = wiphy->bands[IEEE80211_BAND_2GHZ];
257 if (!sband)
258 return;
257 259
258 /* 260 /*
259 * If no country IE has been received always enable active scan 261 * If no country IE has been received always enable active scan
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index 58ea0e5fabfd..5f77cbe0b6aa 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -175,6 +175,7 @@ void b43_generate_plcp_hdr(struct b43_plcp_hdr4 *plcp,
175 } 175 }
176} 176}
177 177
178/* TODO: verify if needed for SSLPN or LCN */
178static u16 b43_generate_tx_phy_ctl1(struct b43_wldev *dev, u8 bitrate) 179static u16 b43_generate_tx_phy_ctl1(struct b43_wldev *dev, u8 bitrate)
179{ 180{
180 const struct b43_phy *phy = &dev->phy; 181 const struct b43_phy *phy = &dev->phy;
@@ -256,6 +257,9 @@ int b43_generate_txhdr(struct b43_wldev *dev,
256 unsigned int plcp_fragment_len; 257 unsigned int plcp_fragment_len;
257 u32 mac_ctl = 0; 258 u32 mac_ctl = 0;
258 u16 phy_ctl = 0; 259 u16 phy_ctl = 0;
260 bool fill_phy_ctl1 = (phy->type == B43_PHYTYPE_LP ||
261 phy->type == B43_PHYTYPE_N ||
262 phy->type == B43_PHYTYPE_HT);
259 u8 extra_ft = 0; 263 u8 extra_ft = 0;
260 struct ieee80211_rate *txrate; 264 struct ieee80211_rate *txrate;
261 struct ieee80211_tx_rate *rates; 265 struct ieee80211_tx_rate *rates;
@@ -531,7 +535,7 @@ int b43_generate_txhdr(struct b43_wldev *dev,
531 extra_ft |= B43_TXH_EFT_RTSFB_CCK; 535 extra_ft |= B43_TXH_EFT_RTSFB_CCK;
532 536
533 if (rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS && 537 if (rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS &&
534 phy->type == B43_PHYTYPE_N) { 538 fill_phy_ctl1) {
535 txhdr->phy_ctl1_rts = cpu_to_le16( 539 txhdr->phy_ctl1_rts = cpu_to_le16(
536 b43_generate_tx_phy_ctl1(dev, rts_rate)); 540 b43_generate_tx_phy_ctl1(dev, rts_rate));
537 txhdr->phy_ctl1_rts_fb = cpu_to_le16( 541 txhdr->phy_ctl1_rts_fb = cpu_to_le16(
@@ -552,7 +556,7 @@ int b43_generate_txhdr(struct b43_wldev *dev,
552 break; 556 break;
553 } 557 }
554 558
555 if (phy->type == B43_PHYTYPE_N) { 559 if (fill_phy_ctl1) {
556 txhdr->phy_ctl1 = 560 txhdr->phy_ctl1 =
557 cpu_to_le16(b43_generate_tx_phy_ctl1(dev, rate)); 561 cpu_to_le16(b43_generate_tx_phy_ctl1(dev, rate));
558 txhdr->phy_ctl1_fb = 562 txhdr->phy_ctl1_fb =
@@ -736,7 +740,14 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
736 740
737 /* Link quality statistics */ 741 /* Link quality statistics */
738 switch (chanstat & B43_RX_CHAN_PHYTYPE) { 742 switch (chanstat & B43_RX_CHAN_PHYTYPE) {
743 case B43_PHYTYPE_HT:
744 /* TODO: is max the right choice? */
745 status.signal = max_t(__s8,
746 max(rxhdr->phy_ht_power0, rxhdr->phy_ht_power1),
747 rxhdr->phy_ht_power2);
748 break;
739 case B43_PHYTYPE_N: 749 case B43_PHYTYPE_N:
750 /* Broadcom has code for min and avg, but always uses max */
740 if (rxhdr->power0 == 16 || rxhdr->power0 == 32) 751 if (rxhdr->power0 == 16 || rxhdr->power0 == 32)
741 status.signal = max(rxhdr->power1, rxhdr->power2); 752 status.signal = max(rxhdr->power1, rxhdr->power2);
742 else 753 else
diff --git a/drivers/net/wireless/b43/xmit.h b/drivers/net/wireless/b43/xmit.h
index 16c514d54afa..98d90747836a 100644
--- a/drivers/net/wireless/b43/xmit.h
+++ b/drivers/net/wireless/b43/xmit.h
@@ -249,6 +249,12 @@ struct b43_rxhdr_fw4 {
249 } __packed; 249 } __packed;
250 } __packed; 250 } __packed;
251 union { 251 union {
252 /* HT-PHY */
253 struct {
254 PAD_BYTES(1);
255 __s8 phy_ht_power0;
256 } __packed;
257
252 /* RSSI for N-PHYs */ 258 /* RSSI for N-PHYs */
253 struct { 259 struct {
254 __s8 power2; 260 __s8 power2;
@@ -257,7 +263,15 @@ struct b43_rxhdr_fw4 {
257 263
258 __le16 phy_status2; /* PHY RX Status 2 */ 264 __le16 phy_status2; /* PHY RX Status 2 */
259 } __packed; 265 } __packed;
260 __le16 phy_status3; /* PHY RX Status 3 */ 266 union {
267 /* HT-PHY */
268 struct {
269 __s8 phy_ht_power1;
270 __s8 phy_ht_power2;
271 } __packed;
272
273 __le16 phy_status3; /* PHY RX Status 3 */
274 } __packed;
261 union { 275 union {
262 /* Tested with 598.314, 644.1001 and 666.2 */ 276 /* Tested with 598.314, 644.1001 and 666.2 */
263 struct { 277 struct {
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/dma.c b/drivers/net/wireless/brcm80211/brcmsmac/dma.c
index b56a30297c26..6ebec8f42846 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/dma.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/dma.c
@@ -358,13 +358,14 @@ static uint nrxdactive(struct dma_info *di, uint h, uint t)
358 358
359static uint _dma_ctrlflags(struct dma_info *di, uint mask, uint flags) 359static uint _dma_ctrlflags(struct dma_info *di, uint mask, uint flags)
360{ 360{
361 uint dmactrlflags = di->dma.dmactrlflags; 361 uint dmactrlflags;
362 362
363 if (di == NULL) { 363 if (di == NULL) {
364 DMA_ERROR(("%s: _dma_ctrlflags: NULL dma handle\n", di->name)); 364 DMA_ERROR(("_dma_ctrlflags: NULL dma handle\n"));
365 return 0; 365 return 0;
366 } 366 }
367 367
368 dmactrlflags = di->dma.dmactrlflags;
368 dmactrlflags &= ~mask; 369 dmactrlflags &= ~mask;
369 dmactrlflags |= flags; 370 dmactrlflags |= flags;
370 371
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index e12b48c2cff6..dd008b0e6417 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -191,6 +191,7 @@ static struct iwl_base_params iwl1000_base_params = {
191 .chain_noise_scale = 1000, 191 .chain_noise_scale = 1000,
192 .wd_timeout = IWL_DEF_WD_TIMEOUT, 192 .wd_timeout = IWL_DEF_WD_TIMEOUT,
193 .max_event_log_size = 128, 193 .max_event_log_size = 128,
194 .wd_disable = true,
194}; 195};
195static struct iwl_ht_params iwl1000_ht_params = { 196static struct iwl_ht_params iwl1000_ht_params = {
196 .ht_greenfield_support = true, 197 .ht_greenfield_support = true,
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index c511c98a89a8..f55fb2d1af52 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -364,6 +364,7 @@ static struct iwl_base_params iwl5000_base_params = {
364 .wd_timeout = IWL_LONG_WD_TIMEOUT, 364 .wd_timeout = IWL_LONG_WD_TIMEOUT,
365 .max_event_log_size = 512, 365 .max_event_log_size = 512,
366 .no_idle_support = true, 366 .no_idle_support = true,
367 .wd_disable = true,
367}; 368};
368static struct iwl_ht_params iwl5000_ht_params = { 369static struct iwl_ht_params iwl5000_ht_params = {
369 .ht_greenfield_support = true, 370 .ht_greenfield_support = true,
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
index 58a381c01c89..a7a6def40d05 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
@@ -528,6 +528,24 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
528 return 0; 528 return 0;
529} 529}
530 530
531void iwlagn_config_ht40(struct ieee80211_conf *conf,
532 struct iwl_rxon_context *ctx)
533{
534 if (conf_is_ht40_minus(conf)) {
535 ctx->ht.extension_chan_offset =
536 IEEE80211_HT_PARAM_CHA_SEC_BELOW;
537 ctx->ht.is_40mhz = true;
538 } else if (conf_is_ht40_plus(conf)) {
539 ctx->ht.extension_chan_offset =
540 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
541 ctx->ht.is_40mhz = true;
542 } else {
543 ctx->ht.extension_chan_offset =
544 IEEE80211_HT_PARAM_CHA_SEC_NONE;
545 ctx->ht.is_40mhz = false;
546 }
547}
548
531int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed) 549int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
532{ 550{
533 struct iwl_priv *priv = hw->priv; 551 struct iwl_priv *priv = hw->priv;
@@ -586,19 +604,11 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
586 ctx->ht.enabled = conf_is_ht(conf); 604 ctx->ht.enabled = conf_is_ht(conf);
587 605
588 if (ctx->ht.enabled) { 606 if (ctx->ht.enabled) {
589 if (conf_is_ht40_minus(conf)) { 607 /* if HT40 is used, it should not change
590 ctx->ht.extension_chan_offset = 608 * after associated except channel switch */
591 IEEE80211_HT_PARAM_CHA_SEC_BELOW; 609 if (iwl_is_associated_ctx(ctx) &&
592 ctx->ht.is_40mhz = true; 610 !ctx->ht.is_40mhz)
593 } else if (conf_is_ht40_plus(conf)) { 611 iwlagn_config_ht40(conf, ctx);
594 ctx->ht.extension_chan_offset =
595 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
596 ctx->ht.is_40mhz = true;
597 } else {
598 ctx->ht.extension_chan_offset =
599 IEEE80211_HT_PARAM_CHA_SEC_NONE;
600 ctx->ht.is_40mhz = false;
601 }
602 } else 612 } else
603 ctx->ht.is_40mhz = false; 613 ctx->ht.is_40mhz = false;
604 614
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
index ed6283623932..4b2aa1da0953 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
@@ -1268,9 +1268,6 @@ int iwl_set_dynamic_key(struct iwl_priv *priv,
1268 1268
1269 switch (keyconf->cipher) { 1269 switch (keyconf->cipher) {
1270 case WLAN_CIPHER_SUITE_TKIP: 1270 case WLAN_CIPHER_SUITE_TKIP:
1271 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
1272 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
1273
1274 if (sta) 1271 if (sta)
1275 addr = sta->addr; 1272 addr = sta->addr;
1276 else /* station mode case only */ 1273 else /* station mode case only */
@@ -1283,8 +1280,6 @@ int iwl_set_dynamic_key(struct iwl_priv *priv,
1283 seq.tkip.iv32, p1k, CMD_SYNC); 1280 seq.tkip.iv32, p1k, CMD_SYNC);
1284 break; 1281 break;
1285 case WLAN_CIPHER_SUITE_CCMP: 1282 case WLAN_CIPHER_SUITE_CCMP:
1286 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
1287 /* fall through */
1288 case WLAN_CIPHER_SUITE_WEP40: 1283 case WLAN_CIPHER_SUITE_WEP40:
1289 case WLAN_CIPHER_SUITE_WEP104: 1284 case WLAN_CIPHER_SUITE_WEP104:
1290 ret = iwlagn_send_sta_key(priv, keyconf, sta_id, 1285 ret = iwlagn_send_sta_key(priv, keyconf, sta_id,
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index ccba69b7f8a7..bacc06c95e7a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -2316,6 +2316,17 @@ static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
2316 return -EOPNOTSUPP; 2316 return -EOPNOTSUPP;
2317 } 2317 }
2318 2318
2319 switch (key->cipher) {
2320 case WLAN_CIPHER_SUITE_TKIP:
2321 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
2322 /* fall through */
2323 case WLAN_CIPHER_SUITE_CCMP:
2324 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
2325 break;
2326 default:
2327 break;
2328 }
2329
2319 /* 2330 /*
2320 * We could program these keys into the hardware as well, but we 2331 * We could program these keys into the hardware as well, but we
2321 * don't expect much multicast traffic in IBSS and having keys 2332 * don't expect much multicast traffic in IBSS and having keys
@@ -2599,21 +2610,9 @@ static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
2599 2610
2600 /* Configure HT40 channels */ 2611 /* Configure HT40 channels */
2601 ctx->ht.enabled = conf_is_ht(conf); 2612 ctx->ht.enabled = conf_is_ht(conf);
2602 if (ctx->ht.enabled) { 2613 if (ctx->ht.enabled)
2603 if (conf_is_ht40_minus(conf)) { 2614 iwlagn_config_ht40(conf, ctx);
2604 ctx->ht.extension_chan_offset = 2615 else
2605 IEEE80211_HT_PARAM_CHA_SEC_BELOW;
2606 ctx->ht.is_40mhz = true;
2607 } else if (conf_is_ht40_plus(conf)) {
2608 ctx->ht.extension_chan_offset =
2609 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
2610 ctx->ht.is_40mhz = true;
2611 } else {
2612 ctx->ht.extension_chan_offset =
2613 IEEE80211_HT_PARAM_CHA_SEC_NONE;
2614 ctx->ht.is_40mhz = false;
2615 }
2616 } else
2617 ctx->ht.is_40mhz = false; 2616 ctx->ht.is_40mhz = false;
2618 2617
2619 if ((le16_to_cpu(ctx->staging.channel) != ch)) 2618 if ((le16_to_cpu(ctx->staging.channel) != ch))
@@ -3499,9 +3498,10 @@ MODULE_PARM_DESC(plcp_check, "Check plcp health (default: 1 [enabled])");
3499module_param_named(ack_check, iwlagn_mod_params.ack_check, bool, S_IRUGO); 3498module_param_named(ack_check, iwlagn_mod_params.ack_check, bool, S_IRUGO);
3500MODULE_PARM_DESC(ack_check, "Check ack health (default: 0 [disabled])"); 3499MODULE_PARM_DESC(ack_check, "Check ack health (default: 0 [disabled])");
3501 3500
3502module_param_named(wd_disable, iwlagn_mod_params.wd_disable, bool, S_IRUGO); 3501module_param_named(wd_disable, iwlagn_mod_params.wd_disable, int, S_IRUGO);
3503MODULE_PARM_DESC(wd_disable, 3502MODULE_PARM_DESC(wd_disable,
3504 "Disable stuck queue watchdog timer (default: 0 [enabled])"); 3503 "Disable stuck queue watchdog timer 0=system default, "
3504 "1=disable, 2=enable (default: 0)");
3505 3505
3506/* 3506/*
3507 * set bt_coex_active to true, uCode will do kill/defer 3507 * set bt_coex_active to true, uCode will do kill/defer
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.h b/drivers/net/wireless/iwlwifi/iwl-agn.h
index 5b936ec1a541..3856abaea507 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.h
@@ -86,6 +86,8 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
86 struct ieee80211_vif *vif, 86 struct ieee80211_vif *vif,
87 struct ieee80211_bss_conf *bss_conf, 87 struct ieee80211_bss_conf *bss_conf,
88 u32 changes); 88 u32 changes);
89void iwlagn_config_ht40(struct ieee80211_conf *conf,
90 struct iwl_rxon_context *ctx);
89 91
90/* uCode */ 92/* uCode */
91int iwlagn_rx_calib_result(struct iwl_priv *priv, 93int iwlagn_rx_calib_result(struct iwl_priv *priv,
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 001fdf140abb..fcf54160e4ed 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -1810,11 +1810,23 @@ void iwl_setup_watchdog(struct iwl_priv *priv)
1810{ 1810{
1811 unsigned int timeout = priv->cfg->base_params->wd_timeout; 1811 unsigned int timeout = priv->cfg->base_params->wd_timeout;
1812 1812
1813 if (timeout && !iwlagn_mod_params.wd_disable) 1813 if (!iwlagn_mod_params.wd_disable) {
1814 mod_timer(&priv->watchdog, 1814 /* use system default */
1815 jiffies + msecs_to_jiffies(IWL_WD_TICK(timeout))); 1815 if (timeout && !priv->cfg->base_params->wd_disable)
1816 else 1816 mod_timer(&priv->watchdog,
1817 del_timer(&priv->watchdog); 1817 jiffies +
1818 msecs_to_jiffies(IWL_WD_TICK(timeout)));
1819 else
1820 del_timer(&priv->watchdog);
1821 } else {
1822 /* module parameter overwrite default configuration */
1823 if (timeout && iwlagn_mod_params.wd_disable == 2)
1824 mod_timer(&priv->watchdog,
1825 jiffies +
1826 msecs_to_jiffies(IWL_WD_TICK(timeout)));
1827 else
1828 del_timer(&priv->watchdog);
1829 }
1818} 1830}
1819 1831
1820/** 1832/**
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 137da3380704..f2fc288f3dd3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -113,6 +113,7 @@ struct iwl_lib_ops {
113 * @shadow_reg_enable: HW shadhow register bit 113 * @shadow_reg_enable: HW shadhow register bit
114 * @no_idle_support: do not support idle mode 114 * @no_idle_support: do not support idle mode
115 * @hd_v2: v2 of enhanced sensitivity value, used for 2000 series and up 115 * @hd_v2: v2 of enhanced sensitivity value, used for 2000 series and up
116 * wd_disable: disable watchdog timer
116 */ 117 */
117struct iwl_base_params { 118struct iwl_base_params {
118 int eeprom_size; 119 int eeprom_size;
@@ -134,6 +135,7 @@ struct iwl_base_params {
134 const bool shadow_reg_enable; 135 const bool shadow_reg_enable;
135 const bool no_idle_support; 136 const bool no_idle_support;
136 const bool hd_v2; 137 const bool hd_v2;
138 const bool wd_disable;
137}; 139};
138/* 140/*
139 * @advanced_bt_coexist: support advanced bt coexist 141 * @advanced_bt_coexist: support advanced bt coexist
diff --git a/drivers/net/wireless/iwlwifi/iwl-shared.h b/drivers/net/wireless/iwlwifi/iwl-shared.h
index 1f7a93c67c45..14eaf37ce3b1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-shared.h
+++ b/drivers/net/wireless/iwlwifi/iwl-shared.h
@@ -120,7 +120,7 @@ extern struct iwl_mod_params iwlagn_mod_params;
120 * @restart_fw: restart firmware, default = 1 120 * @restart_fw: restart firmware, default = 1
121 * @plcp_check: enable plcp health check, default = true 121 * @plcp_check: enable plcp health check, default = true
122 * @ack_check: disable ack health check, default = false 122 * @ack_check: disable ack health check, default = false
123 * @wd_disable: enable stuck queue check, default = false 123 * @wd_disable: enable stuck queue check, default = 0
124 * @bt_coex_active: enable bt coex, default = true 124 * @bt_coex_active: enable bt coex, default = true
125 * @led_mode: system default, default = 0 125 * @led_mode: system default, default = 0
126 * @no_sleep_autoadjust: disable autoadjust, default = true 126 * @no_sleep_autoadjust: disable autoadjust, default = true
@@ -141,7 +141,7 @@ struct iwl_mod_params {
141 int restart_fw; 141 int restart_fw;
142 bool plcp_check; 142 bool plcp_check;
143 bool ack_check; 143 bool ack_check;
144 bool wd_disable; 144 int wd_disable;
145 bool bt_coex_active; 145 bool bt_coex_active;
146 int led_mode; 146 int led_mode;
147 bool no_sleep_autoadjust; 147 bool no_sleep_autoadjust;
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
index da3411057afc..ce918980e977 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
@@ -990,29 +990,16 @@ static int iwl_trans_tx_stop(struct iwl_trans *trans)
990 return 0; 990 return 0;
991} 991}
992 992
993static void iwl_trans_pcie_disable_sync_irq(struct iwl_trans *trans) 993static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
994{ 994{
995 unsigned long flags; 995 unsigned long flags;
996 struct iwl_trans_pcie *trans_pcie = 996 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
997 IWL_TRANS_GET_PCIE_TRANS(trans);
998 997
998 /* tell the device to stop sending interrupts */
999 spin_lock_irqsave(&trans->shrd->lock, flags); 999 spin_lock_irqsave(&trans->shrd->lock, flags);
1000 iwl_disable_interrupts(trans); 1000 iwl_disable_interrupts(trans);
1001 spin_unlock_irqrestore(&trans->shrd->lock, flags); 1001 spin_unlock_irqrestore(&trans->shrd->lock, flags);
1002 1002
1003 /* wait to make sure we flush pending tasklet*/
1004 synchronize_irq(bus(trans)->irq);
1005 tasklet_kill(&trans_pcie->irq_tasklet);
1006}
1007
1008static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
1009{
1010 /* stop and reset the on-board processor */
1011 iwl_write32(bus(trans), CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
1012
1013 /* tell the device to stop sending interrupts */
1014 iwl_trans_pcie_disable_sync_irq(trans);
1015
1016 /* device going down, Stop using ICT table */ 1003 /* device going down, Stop using ICT table */
1017 iwl_disable_ict(trans); 1004 iwl_disable_ict(trans);
1018 1005
@@ -1039,6 +1026,20 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
1039 1026
1040 /* Stop the device, and put it in low power state */ 1027 /* Stop the device, and put it in low power state */
1041 iwl_apm_stop(priv(trans)); 1028 iwl_apm_stop(priv(trans));
1029
1030 /* Upon stop, the APM issues an interrupt if HW RF kill is set.
1031 * Clean again the interrupt here
1032 */
1033 spin_lock_irqsave(&trans->shrd->lock, flags);
1034 iwl_disable_interrupts(trans);
1035 spin_unlock_irqrestore(&trans->shrd->lock, flags);
1036
1037 /* wait to make sure we flush pending tasklet*/
1038 synchronize_irq(bus(trans)->irq);
1039 tasklet_kill(&trans_pcie->irq_tasklet);
1040
1041 /* stop and reset the on-board processor */
1042 iwl_write32(bus(trans), CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
1042} 1043}
1043 1044
1044static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, 1045static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index 4fcd653bddc4..a7f1ab28940d 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -634,7 +634,7 @@ static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy,
634 if (channel && 634 if (channel &&
635 !(channel->flags & IEEE80211_CHAN_DISABLED)) 635 !(channel->flags & IEEE80211_CHAN_DISABLED))
636 cfg80211_inform_bss(wiphy, channel, 636 cfg80211_inform_bss(wiphy, channel,
637 bssid, le64_to_cpu(*(__le64 *)tsfdesc), 637 bssid, get_unaligned_le64(tsfdesc),
638 capa, intvl, ie, ielen, 638 capa, intvl, ie, ielen,
639 LBS_SCAN_RSSI_TO_MBM(rssi), 639 LBS_SCAN_RSSI_TO_MBM(rssi),
640 GFP_KERNEL); 640 GFP_KERNEL);
diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/libertas/if_spi.c
index 11b69b300dc0..728baa445259 100644
--- a/drivers/net/wireless/libertas/if_spi.c
+++ b/drivers/net/wireless/libertas/if_spi.c
@@ -995,6 +995,7 @@ static int if_spi_host_to_card(struct lbs_private *priv,
995 spin_unlock_irqrestore(&card->buffer_lock, flags); 995 spin_unlock_irqrestore(&card->buffer_lock, flags);
996 break; 996 break;
997 default: 997 default:
998 kfree(packet);
998 netdev_err(priv->dev, "can't transfer buffer of type %d\n", 999 netdev_err(priv->dev, "can't transfer buffer of type %d\n",
999 type); 1000 type);
1000 err = -EINVAL; 1001 err = -EINVAL;
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index dae8dbb24a03..8d3ab378662b 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -819,8 +819,10 @@ mwifiex_scan_setup_scan_config(struct mwifiex_private *priv,
819 wildcard_ssid_tlv->header.len = cpu_to_le16( 819 wildcard_ssid_tlv->header.len = cpu_to_le16(
820 (u16) (ssid_len + sizeof(wildcard_ssid_tlv-> 820 (u16) (ssid_len + sizeof(wildcard_ssid_tlv->
821 max_ssid_length))); 821 max_ssid_length)));
822 wildcard_ssid_tlv->max_ssid_length = 822
823 user_scan_in->ssid_list[ssid_idx].max_len; 823 /* max_ssid_length = 0 tells firmware to perform
824 specific scan for the SSID filled */
825 wildcard_ssid_tlv->max_ssid_length = 0;
824 826
825 memcpy(wildcard_ssid_tlv->ssid, 827 memcpy(wildcard_ssid_tlv->ssid,
826 user_scan_in->ssid_list[ssid_idx].ssid, 828 user_scan_in->ssid_list[ssid_idx].ssid,
@@ -1469,7 +1471,7 @@ mwifiex_update_curr_bss_params(struct mwifiex_private *priv, u8 *bssid,
1469 s32 rssi, const u8 *ie_buf, size_t ie_len, 1471 s32 rssi, const u8 *ie_buf, size_t ie_len,
1470 u16 beacon_period, u16 cap_info_bitmap, u8 band) 1472 u16 beacon_period, u16 cap_info_bitmap, u8 band)
1471{ 1473{
1472 struct mwifiex_bssdescriptor *bss_desc = NULL; 1474 struct mwifiex_bssdescriptor *bss_desc;
1473 int ret; 1475 int ret;
1474 unsigned long flags; 1476 unsigned long flags;
1475 u8 *beacon_ie; 1477 u8 *beacon_ie;
@@ -1484,6 +1486,7 @@ mwifiex_update_curr_bss_params(struct mwifiex_private *priv, u8 *bssid,
1484 1486
1485 beacon_ie = kmemdup(ie_buf, ie_len, GFP_KERNEL); 1487 beacon_ie = kmemdup(ie_buf, ie_len, GFP_KERNEL);
1486 if (!beacon_ie) { 1488 if (!beacon_ie) {
1489 kfree(bss_desc);
1487 dev_err(priv->adapter->dev, " failed to alloc beacon_ie\n"); 1490 dev_err(priv->adapter->dev, " failed to alloc beacon_ie\n");
1488 return -ENOMEM; 1491 return -ENOMEM;
1489 } 1492 }
diff --git a/drivers/net/wireless/p54/p54spi.c b/drivers/net/wireless/p54/p54spi.c
index f18df82eeb92..78d0d6988553 100644
--- a/drivers/net/wireless/p54/p54spi.c
+++ b/drivers/net/wireless/p54/p54spi.c
@@ -588,8 +588,6 @@ static void p54spi_op_stop(struct ieee80211_hw *dev)
588 588
589 WARN_ON(priv->fw_state != FW_STATE_READY); 589 WARN_ON(priv->fw_state != FW_STATE_READY);
590 590
591 cancel_work_sync(&priv->work);
592
593 p54spi_power_off(priv); 591 p54spi_power_off(priv);
594 spin_lock_irqsave(&priv->tx_lock, flags); 592 spin_lock_irqsave(&priv->tx_lock, flags);
595 INIT_LIST_HEAD(&priv->tx_pending); 593 INIT_LIST_HEAD(&priv->tx_pending);
@@ -597,6 +595,8 @@ static void p54spi_op_stop(struct ieee80211_hw *dev)
597 595
598 priv->fw_state = FW_STATE_OFF; 596 priv->fw_state = FW_STATE_OFF;
599 mutex_unlock(&priv->mutex); 597 mutex_unlock(&priv->mutex);
598
599 cancel_work_sync(&priv->work);
600} 600}
601 601
602static int __devinit p54spi_probe(struct spi_device *spi) 602static int __devinit p54spi_probe(struct spi_device *spi)
@@ -656,6 +656,7 @@ static int __devinit p54spi_probe(struct spi_device *spi)
656 init_completion(&priv->fw_comp); 656 init_completion(&priv->fw_comp);
657 INIT_LIST_HEAD(&priv->tx_pending); 657 INIT_LIST_HEAD(&priv->tx_pending);
658 mutex_init(&priv->mutex); 658 mutex_init(&priv->mutex);
659 spin_lock_init(&priv->tx_lock);
659 SET_IEEE80211_DEV(hw, &spi->dev); 660 SET_IEEE80211_DEV(hw, &spi->dev);
660 priv->common.open = p54spi_op_start; 661 priv->common.open = p54spi_op_start;
661 priv->common.stop = p54spi_op_stop; 662 priv->common.stop = p54spi_op_stop;
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index d97a2caf582b..bc2ba80c47bb 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -778,7 +778,7 @@ prism54_get_essid(struct net_device *ndev, struct iw_request_info *info,
778 dwrq->flags = 0; 778 dwrq->flags = 0;
779 dwrq->length = 0; 779 dwrq->length = 0;
780 } 780 }
781 essid->octets[essid->length] = '\0'; 781 essid->octets[dwrq->length] = '\0';
782 memcpy(extra, essid->octets, dwrq->length); 782 memcpy(extra, essid->octets, dwrq->length);
783 kfree(essid); 783 kfree(essid);
784 784
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 3f183a15186e..1ba079dffb11 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -3771,7 +3771,7 @@ static void rt2800_efuse_read(struct rt2x00_dev *rt2x00dev, unsigned int i)
3771 /* Apparently the data is read from end to start */ 3771 /* Apparently the data is read from end to start */
3772 rt2800_register_read_lock(rt2x00dev, EFUSE_DATA3, &reg); 3772 rt2800_register_read_lock(rt2x00dev, EFUSE_DATA3, &reg);
3773 /* The returned value is in CPU order, but eeprom is le */ 3773 /* The returned value is in CPU order, but eeprom is le */
3774 rt2x00dev->eeprom[i] = cpu_to_le32(reg); 3774 *(u32 *)&rt2x00dev->eeprom[i] = cpu_to_le32(reg);
3775 rt2800_register_read_lock(rt2x00dev, EFUSE_DATA2, &reg); 3775 rt2800_register_read_lock(rt2x00dev, EFUSE_DATA2, &reg);
3776 *(u32 *)&rt2x00dev->eeprom[i + 2] = cpu_to_le32(reg); 3776 *(u32 *)&rt2x00dev->eeprom[i + 2] = cpu_to_le32(reg);
3777 rt2800_register_read_lock(rt2x00dev, EFUSE_DATA1, &reg); 3777 rt2800_register_read_lock(rt2x00dev, EFUSE_DATA1, &reg);
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index f1565792f270..377876315b8d 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -919,6 +919,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
919 { USB_DEVICE(0x050d, 0x935b) }, 919 { USB_DEVICE(0x050d, 0x935b) },
920 /* Buffalo */ 920 /* Buffalo */
921 { USB_DEVICE(0x0411, 0x00e8) }, 921 { USB_DEVICE(0x0411, 0x00e8) },
922 { USB_DEVICE(0x0411, 0x0158) },
922 { USB_DEVICE(0x0411, 0x016f) }, 923 { USB_DEVICE(0x0411, 0x016f) },
923 { USB_DEVICE(0x0411, 0x01a2) }, 924 { USB_DEVICE(0x0411, 0x01a2) },
924 /* Corega */ 925 /* Corega */
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 2ec5c00235e6..99ff12d0c29d 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -943,6 +943,7 @@ struct rt2x00_dev {
943 * Powersaving work 943 * Powersaving work
944 */ 944 */
945 struct delayed_work autowakeup_work; 945 struct delayed_work autowakeup_work;
946 struct work_struct sleep_work;
946 947
947 /* 948 /*
948 * Data queue arrays for RX, TX, Beacon and ATIM. 949 * Data queue arrays for RX, TX, Beacon and ATIM.
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index e1fb2a8569be..edd317fa7c0a 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -465,6 +465,23 @@ static u8 *rt2x00lib_find_ie(u8 *data, unsigned int len, u8 ie)
465 return NULL; 465 return NULL;
466} 466}
467 467
468static void rt2x00lib_sleep(struct work_struct *work)
469{
470 struct rt2x00_dev *rt2x00dev =
471 container_of(work, struct rt2x00_dev, sleep_work);
472
473 if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
474 return;
475
476 /*
477 * Check again is powersaving is enabled, to prevent races from delayed
478 * work execution.
479 */
480 if (!test_bit(CONFIG_POWERSAVING, &rt2x00dev->flags))
481 rt2x00lib_config(rt2x00dev, &rt2x00dev->hw->conf,
482 IEEE80211_CONF_CHANGE_PS);
483}
484
468static void rt2x00lib_rxdone_check_ps(struct rt2x00_dev *rt2x00dev, 485static void rt2x00lib_rxdone_check_ps(struct rt2x00_dev *rt2x00dev,
469 struct sk_buff *skb, 486 struct sk_buff *skb,
470 struct rxdone_entry_desc *rxdesc) 487 struct rxdone_entry_desc *rxdesc)
@@ -512,8 +529,7 @@ static void rt2x00lib_rxdone_check_ps(struct rt2x00_dev *rt2x00dev,
512 cam |= (tim_ie->bitmap_ctrl & 0x01); 529 cam |= (tim_ie->bitmap_ctrl & 0x01);
513 530
514 if (!cam && !test_bit(CONFIG_POWERSAVING, &rt2x00dev->flags)) 531 if (!cam && !test_bit(CONFIG_POWERSAVING, &rt2x00dev->flags))
515 rt2x00lib_config(rt2x00dev, &rt2x00dev->hw->conf, 532 queue_work(rt2x00dev->workqueue, &rt2x00dev->sleep_work);
516 IEEE80211_CONF_CHANGE_PS);
517} 533}
518 534
519static int rt2x00lib_rxdone_read_signal(struct rt2x00_dev *rt2x00dev, 535static int rt2x00lib_rxdone_read_signal(struct rt2x00_dev *rt2x00dev,
@@ -1141,6 +1157,7 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
1141 1157
1142 INIT_WORK(&rt2x00dev->intf_work, rt2x00lib_intf_scheduled); 1158 INIT_WORK(&rt2x00dev->intf_work, rt2x00lib_intf_scheduled);
1143 INIT_DELAYED_WORK(&rt2x00dev->autowakeup_work, rt2x00lib_autowakeup); 1159 INIT_DELAYED_WORK(&rt2x00dev->autowakeup_work, rt2x00lib_autowakeup);
1160 INIT_WORK(&rt2x00dev->sleep_work, rt2x00lib_sleep);
1144 1161
1145 /* 1162 /*
1146 * Let the driver probe the device to detect the capabilities. 1163 * Let the driver probe the device to detect the capabilities.
@@ -1197,6 +1214,7 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
1197 */ 1214 */
1198 cancel_work_sync(&rt2x00dev->intf_work); 1215 cancel_work_sync(&rt2x00dev->intf_work);
1199 cancel_delayed_work_sync(&rt2x00dev->autowakeup_work); 1216 cancel_delayed_work_sync(&rt2x00dev->autowakeup_work);
1217 cancel_work_sync(&rt2x00dev->sleep_work);
1200 if (rt2x00_is_usb(rt2x00dev)) { 1218 if (rt2x00_is_usb(rt2x00dev)) {
1201 del_timer_sync(&rt2x00dev->txstatus_timer); 1219 del_timer_sync(&rt2x00dev->txstatus_timer);
1202 cancel_work_sync(&rt2x00dev->rxdone_work); 1220 cancel_work_sync(&rt2x00dev->rxdone_work);
diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/rtlwifi/ps.c
index db5262844543..55c8e50f45fd 100644
--- a/drivers/net/wireless/rtlwifi/ps.c
+++ b/drivers/net/wireless/rtlwifi/ps.c
@@ -395,7 +395,7 @@ void rtl_lps_enter(struct ieee80211_hw *hw)
395 if (mac->link_state != MAC80211_LINKED) 395 if (mac->link_state != MAC80211_LINKED)
396 return; 396 return;
397 397
398 spin_lock(&rtlpriv->locks.lps_lock); 398 spin_lock_irq(&rtlpriv->locks.lps_lock);
399 399
400 /* Idle for a while if we connect to AP a while ago. */ 400 /* Idle for a while if we connect to AP a while ago. */
401 if (mac->cnt_after_linked >= 2) { 401 if (mac->cnt_after_linked >= 2) {
@@ -407,7 +407,7 @@ void rtl_lps_enter(struct ieee80211_hw *hw)
407 } 407 }
408 } 408 }
409 409
410 spin_unlock(&rtlpriv->locks.lps_lock); 410 spin_unlock_irq(&rtlpriv->locks.lps_lock);
411} 411}
412 412
413/*Leave the leisure power save mode.*/ 413/*Leave the leisure power save mode.*/
@@ -416,8 +416,9 @@ void rtl_lps_leave(struct ieee80211_hw *hw)
416 struct rtl_priv *rtlpriv = rtl_priv(hw); 416 struct rtl_priv *rtlpriv = rtl_priv(hw);
417 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); 417 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
418 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 418 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
419 unsigned long flags;
419 420
420 spin_lock(&rtlpriv->locks.lps_lock); 421 spin_lock_irqsave(&rtlpriv->locks.lps_lock, flags);
421 422
422 if (ppsc->fwctrl_lps) { 423 if (ppsc->fwctrl_lps) {
423 if (ppsc->dot11_psmode != EACTIVE) { 424 if (ppsc->dot11_psmode != EACTIVE) {
@@ -438,7 +439,7 @@ void rtl_lps_leave(struct ieee80211_hw *hw)
438 rtl_lps_set_psmode(hw, EACTIVE); 439 rtl_lps_set_psmode(hw, EACTIVE);
439 } 440 }
440 } 441 }
441 spin_unlock(&rtlpriv->locks.lps_lock); 442 spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flags);
442} 443}
443 444
444/* For sw LPS*/ 445/* For sw LPS*/
@@ -539,9 +540,9 @@ void rtl_swlps_rf_awake(struct ieee80211_hw *hw)
539 RT_CLEAR_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM); 540 RT_CLEAR_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM);
540 } 541 }
541 542
542 spin_lock(&rtlpriv->locks.lps_lock); 543 spin_lock_irq(&rtlpriv->locks.lps_lock);
543 rtl_ps_set_rf_state(hw, ERFON, RF_CHANGE_BY_PS); 544 rtl_ps_set_rf_state(hw, ERFON, RF_CHANGE_BY_PS);
544 spin_unlock(&rtlpriv->locks.lps_lock); 545 spin_unlock_irq(&rtlpriv->locks.lps_lock);
545} 546}
546 547
547void rtl_swlps_rfon_wq_callback(void *data) 548void rtl_swlps_rfon_wq_callback(void *data)
@@ -574,9 +575,9 @@ void rtl_swlps_rf_sleep(struct ieee80211_hw *hw)
574 if (rtlpriv->link_info.busytraffic) 575 if (rtlpriv->link_info.busytraffic)
575 return; 576 return;
576 577
577 spin_lock(&rtlpriv->locks.lps_lock); 578 spin_lock_irq(&rtlpriv->locks.lps_lock);
578 rtl_ps_set_rf_state(hw, ERFSLEEP, RF_CHANGE_BY_PS); 579 rtl_ps_set_rf_state(hw, ERFSLEEP, RF_CHANGE_BY_PS);
579 spin_unlock(&rtlpriv->locks.lps_lock); 580 spin_unlock_irq(&rtlpriv->locks.lps_lock);
580 581
581 if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM && 582 if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM &&
582 !RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM)) { 583 !RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM)) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
index 592a10ac5929..3b585aadabfc 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
@@ -569,7 +569,7 @@ static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
569 } 569 }
570 case ERFSLEEP:{ 570 case ERFSLEEP:{
571 if (ppsc->rfpwr_state == ERFOFF) 571 if (ppsc->rfpwr_state == ERFOFF)
572 break; 572 return false;
573 for (queue_id = 0, i = 0; 573 for (queue_id = 0, i = 0;
574 queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) { 574 queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
575 ring = &pcipriv->dev.tx_ring[queue_id]; 575 ring = &pcipriv->dev.tx_ring[queue_id];
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
index 72852900df84..e49cf2244c75 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
@@ -548,7 +548,7 @@ static bool _rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw,
548 break; 548 break;
549 case ERFSLEEP: 549 case ERFSLEEP:
550 if (ppsc->rfpwr_state == ERFOFF) 550 if (ppsc->rfpwr_state == ERFOFF)
551 break; 551 return false;
552 for (queue_id = 0, i = 0; 552 for (queue_id = 0, i = 0;
553 queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) { 553 queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
554 ring = &pcipriv->dev.tx_ring[queue_id]; 554 ring = &pcipriv->dev.tx_ring[queue_id];
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
index 3ac7af1c5509..0883349e1c83 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
@@ -3374,7 +3374,7 @@ bool rtl92d_phy_set_rf_power_state(struct ieee80211_hw *hw,
3374 break; 3374 break;
3375 case ERFSLEEP: 3375 case ERFSLEEP:
3376 if (ppsc->rfpwr_state == ERFOFF) 3376 if (ppsc->rfpwr_state == ERFOFF)
3377 break; 3377 return false;
3378 3378
3379 for (queue_id = 0, i = 0; 3379 for (queue_id = 0, i = 0;
3380 queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) { 3380 queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
index f27171af979c..f10ac1ad9087 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
@@ -602,7 +602,7 @@ bool rtl92s_phy_set_rf_power_state(struct ieee80211_hw *hw,
602 } 602 }
603 case ERFSLEEP: 603 case ERFSLEEP:
604 if (ppsc->rfpwr_state == ERFOFF) 604 if (ppsc->rfpwr_state == ERFOFF)
605 break; 605 return false;
606 606
607 for (queue_id = 0, i = 0; 607 for (queue_id = 0, i = 0;
608 queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) { 608 queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
diff --git a/drivers/net/wireless/wl12xx/scan.c b/drivers/net/wireless/wl12xx/scan.c
index 128ccb79318c..fc29c671cf3b 100644
--- a/drivers/net/wireless/wl12xx/scan.c
+++ b/drivers/net/wireless/wl12xx/scan.c
@@ -559,7 +559,7 @@ wl12xx_scan_sched_scan_ssid_list(struct wl1271 *wl,
559 break; 559 break;
560 } 560 }
561 /* Fail if SSID isn't present in the filters */ 561 /* Fail if SSID isn't present in the filters */
562 if (j == req->n_ssids) { 562 if (j == cmd->n_ssids) {
563 ret = -EINVAL; 563 ret = -EINVAL;
564 goto out_free; 564 goto out_free;
565 } 565 }
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 0cb594c86090..15e332d08c8d 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1021,7 +1021,7 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
1021 pending_idx = *((u16 *)skb->data); 1021 pending_idx = *((u16 *)skb->data);
1022 xen_netbk_idx_release(netbk, pending_idx); 1022 xen_netbk_idx_release(netbk, pending_idx);
1023 for (j = start; j < i; j++) { 1023 for (j = start; j < i; j++) {
1024 pending_idx = frag_get_pending_idx(&shinfo->frags[i]); 1024 pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
1025 xen_netbk_idx_release(netbk, pending_idx); 1025 xen_netbk_idx_release(netbk, pending_idx);
1026 } 1026 }
1027 1027
@@ -1668,7 +1668,7 @@ static int __init netback_init(void)
1668 "netback/%u", group); 1668 "netback/%u", group);
1669 1669
1670 if (IS_ERR(netbk->task)) { 1670 if (IS_ERR(netbk->task)) {
1671 printk(KERN_ALERT "kthread_run() fails at netback\n"); 1671 printk(KERN_ALERT "kthread_create() fails at netback\n");
1672 del_timer(&netbk->net_timer); 1672 del_timer(&netbk->net_timer);
1673 rc = PTR_ERR(netbk->task); 1673 rc = PTR_ERR(netbk->task);
1674 goto failed_init; 1674 goto failed_init;
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 6d3dd3988d0f..0f0cfa3bca30 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -26,11 +26,6 @@
26#include <linux/string.h> 26#include <linux/string.h>
27#include <linux/slab.h> 27#include <linux/slab.h>
28 28
29/* For archs that don't support NO_IRQ (such as x86), provide a dummy value */
30#ifndef NO_IRQ
31#define NO_IRQ 0
32#endif
33
34/** 29/**
35 * irq_of_parse_and_map - Parse and map an interrupt into linux virq space 30 * irq_of_parse_and_map - Parse and map an interrupt into linux virq space
36 * @device: Device node of the device whose interrupt is to be mapped 31 * @device: Device node of the device whose interrupt is to be mapped
@@ -44,7 +39,7 @@ unsigned int irq_of_parse_and_map(struct device_node *dev, int index)
44 struct of_irq oirq; 39 struct of_irq oirq;
45 40
46 if (of_irq_map_one(dev, index, &oirq)) 41 if (of_irq_map_one(dev, index, &oirq))
47 return NO_IRQ; 42 return 0;
48 43
49 return irq_create_of_mapping(oirq.controller, oirq.specifier, 44 return irq_create_of_mapping(oirq.controller, oirq.specifier,
50 oirq.size); 45 oirq.size);
@@ -60,27 +55,27 @@ EXPORT_SYMBOL_GPL(irq_of_parse_and_map);
60 */ 55 */
61struct device_node *of_irq_find_parent(struct device_node *child) 56struct device_node *of_irq_find_parent(struct device_node *child)
62{ 57{
63 struct device_node *p, *c = child; 58 struct device_node *p;
64 const __be32 *parp; 59 const __be32 *parp;
65 60
66 if (!of_node_get(c)) 61 if (!of_node_get(child))
67 return NULL; 62 return NULL;
68 63
69 do { 64 do {
70 parp = of_get_property(c, "interrupt-parent", NULL); 65 parp = of_get_property(child, "interrupt-parent", NULL);
71 if (parp == NULL) 66 if (parp == NULL)
72 p = of_get_parent(c); 67 p = of_get_parent(child);
73 else { 68 else {
74 if (of_irq_workarounds & OF_IMAP_NO_PHANDLE) 69 if (of_irq_workarounds & OF_IMAP_NO_PHANDLE)
75 p = of_node_get(of_irq_dflt_pic); 70 p = of_node_get(of_irq_dflt_pic);
76 else 71 else
77 p = of_find_node_by_phandle(be32_to_cpup(parp)); 72 p = of_find_node_by_phandle(be32_to_cpup(parp));
78 } 73 }
79 of_node_put(c); 74 of_node_put(child);
80 c = p; 75 child = p;
81 } while (p && of_get_property(p, "#interrupt-cells", NULL) == NULL); 76 } while (p && of_get_property(p, "#interrupt-cells", NULL) == NULL);
82 77
83 return (p == child) ? NULL : p; 78 return p;
84} 79}
85 80
86/** 81/**
@@ -345,7 +340,7 @@ int of_irq_to_resource(struct device_node *dev, int index, struct resource *r)
345 340
346 /* Only dereference the resource if both the 341 /* Only dereference the resource if both the
347 * resource and the irq are valid. */ 342 * resource and the irq are valid. */
348 if (r && irq != NO_IRQ) { 343 if (r && irq) {
349 r->start = r->end = irq; 344 r->start = r->end = irq;
350 r->flags = IORESOURCE_IRQ; 345 r->flags = IORESOURCE_IRQ;
351 r->name = dev->full_name; 346 r->name = dev->full_name;
@@ -363,7 +358,7 @@ int of_irq_count(struct device_node *dev)
363{ 358{
364 int nr = 0; 359 int nr = 0;
365 360
366 while (of_irq_to_resource(dev, nr, NULL) != NO_IRQ) 361 while (of_irq_to_resource(dev, nr, NULL))
367 nr++; 362 nr++;
368 363
369 return nr; 364 return nr;
@@ -383,7 +378,7 @@ int of_irq_to_resource_table(struct device_node *dev, struct resource *res,
383 int i; 378 int i;
384 379
385 for (i = 0; i < nr_irqs; i++, res++) 380 for (i = 0; i < nr_irqs; i++, res++)
386 if (of_irq_to_resource(dev, i, res) == NO_IRQ) 381 if (!of_irq_to_resource(dev, i, res))
387 break; 382 break;
388 383
389 return i; 384 return i;
@@ -424,6 +419,8 @@ void __init of_irq_init(const struct of_device_id *matches)
424 419
425 desc->dev = np; 420 desc->dev = np;
426 desc->interrupt_parent = of_irq_find_parent(np); 421 desc->interrupt_parent = of_irq_find_parent(np);
422 if (desc->interrupt_parent == np)
423 desc->interrupt_parent = NULL;
427 list_add_tail(&desc->list, &intc_desc_list); 424 list_add_tail(&desc->list, &intc_desc_list);
428 } 425 }
429 426
diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
index dccd8636095c..f8c752e408a6 100644
--- a/drivers/oprofile/oprof.c
+++ b/drivers/oprofile/oprof.c
@@ -239,26 +239,45 @@ int oprofile_set_ulong(unsigned long *addr, unsigned long val)
239 return err; 239 return err;
240} 240}
241 241
242static int timer_mode;
243
242static int __init oprofile_init(void) 244static int __init oprofile_init(void)
243{ 245{
244 int err; 246 int err;
245 247
248 /* always init architecture to setup backtrace support */
246 err = oprofile_arch_init(&oprofile_ops); 249 err = oprofile_arch_init(&oprofile_ops);
247 if (err < 0 || timer) { 250
248 printk(KERN_INFO "oprofile: using timer interrupt.\n"); 251 timer_mode = err || timer; /* fall back to timer mode on errors */
252 if (timer_mode) {
253 if (!err)
254 oprofile_arch_exit();
249 err = oprofile_timer_init(&oprofile_ops); 255 err = oprofile_timer_init(&oprofile_ops);
250 if (err) 256 if (err)
251 return err; 257 return err;
252 } 258 }
253 return oprofilefs_register(); 259
260 err = oprofilefs_register();
261 if (!err)
262 return 0;
263
264 /* failed */
265 if (timer_mode)
266 oprofile_timer_exit();
267 else
268 oprofile_arch_exit();
269
270 return err;
254} 271}
255 272
256 273
257static void __exit oprofile_exit(void) 274static void __exit oprofile_exit(void)
258{ 275{
259 oprofile_timer_exit();
260 oprofilefs_unregister(); 276 oprofilefs_unregister();
261 oprofile_arch_exit(); 277 if (timer_mode)
278 oprofile_timer_exit();
279 else
280 oprofile_arch_exit();
262} 281}
263 282
264 283
diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
index 3ef44624f510..878fba126582 100644
--- a/drivers/oprofile/timer_int.c
+++ b/drivers/oprofile/timer_int.c
@@ -110,6 +110,7 @@ int oprofile_timer_init(struct oprofile_operations *ops)
110 ops->start = oprofile_hrtimer_start; 110 ops->start = oprofile_hrtimer_start;
111 ops->stop = oprofile_hrtimer_stop; 111 ops->stop = oprofile_hrtimer_stop;
112 ops->cpu_type = "timer"; 112 ops->cpu_type = "timer";
113 printk(KERN_INFO "oprofile: using timer interrupt.\n");
113 return 0; 114 return 0;
114} 115}
115 116
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index b6f9749b4fa7..f02b5235056d 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -76,6 +76,7 @@ config PCI_IOV
76 76
77config PCI_PRI 77config PCI_PRI
78 bool "PCI PRI support" 78 bool "PCI PRI support"
79 depends on PCI
79 select PCI_ATS 80 select PCI_ATS
80 help 81 help
81 PRI is the PCI Page Request Interface. It allows PCI devices that are 82 PRI is the PCI Page Request Interface. It allows PCI devices that are
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 596172b4ae95..fce1c54a0c8d 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -459,8 +459,17 @@ static int add_bridge(acpi_handle handle)
459{ 459{
460 acpi_status status; 460 acpi_status status;
461 unsigned long long tmp; 461 unsigned long long tmp;
462 struct acpi_pci_root *root;
462 acpi_handle dummy_handle; 463 acpi_handle dummy_handle;
463 464
465 /*
466 * We shouldn't use this bridge if PCIe native hotplug control has been
467 * granted by the BIOS for it.
468 */
469 root = acpi_pci_find_root(handle);
470 if (root && (root->osc_control_set & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL))
471 return -ENODEV;
472
464 /* if the bridge doesn't have _STA, we assume it is always there */ 473 /* if the bridge doesn't have _STA, we assume it is always there */
465 status = acpi_get_handle(handle, "_STA", &dummy_handle); 474 status = acpi_get_handle(handle, "_STA", &dummy_handle);
466 if (ACPI_SUCCESS(status)) { 475 if (ACPI_SUCCESS(status)) {
@@ -1376,13 +1385,23 @@ static void handle_hotplug_event_func(acpi_handle handle, u32 type,
1376static acpi_status 1385static acpi_status
1377find_root_bridges(acpi_handle handle, u32 lvl, void *context, void **rv) 1386find_root_bridges(acpi_handle handle, u32 lvl, void *context, void **rv)
1378{ 1387{
1388 struct acpi_pci_root *root;
1379 int *count = (int *)context; 1389 int *count = (int *)context;
1380 1390
1381 if (acpi_is_root_bridge(handle)) { 1391 if (!acpi_is_root_bridge(handle))
1382 acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY, 1392 return AE_OK;
1383 handle_hotplug_event_bridge, NULL); 1393
1384 (*count)++; 1394 root = acpi_pci_find_root(handle);
1385 } 1395 if (!root)
1396 return AE_OK;
1397
1398 if (root->osc_control_set & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL)
1399 return AE_OK;
1400
1401 (*count)++;
1402 acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
1403 handle_hotplug_event_bridge, NULL);
1404
1386 return AE_OK ; 1405 return AE_OK ;
1387} 1406}
1388 1407
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index 1e9c9aacc3a6..085dbb5fc168 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -213,9 +213,6 @@ static int board_added(struct slot *p_slot)
213 goto err_exit; 213 goto err_exit;
214 } 214 }
215 215
216 /* Wait for 1 second after checking link training status */
217 msleep(1000);
218
219 /* Check for a power fault */ 216 /* Check for a power fault */
220 if (ctrl->power_fault_detected || pciehp_query_power_fault(p_slot)) { 217 if (ctrl->power_fault_detected || pciehp_query_power_fault(p_slot)) {
221 ctrl_err(ctrl, "Power fault on slot %s\n", slot_name(p_slot)); 218 ctrl_err(ctrl, "Power fault on slot %s\n", slot_name(p_slot));
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 96dc4734e4af..7b1414810ae3 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -280,6 +280,14 @@ int pciehp_check_link_status(struct controller *ctrl)
280 else 280 else
281 msleep(1000); 281 msleep(1000);
282 282
283 /*
284 * Need to wait for 1000 ms after Data Link Layer Link Active
285 * (DLLLA) bit reads 1b before sending configuration request.
286 * We need it before checking Link Training (LT) bit becuase
287 * LT is still set even after DLLLA bit is set on some platform.
288 */
289 msleep(1000);
290
283 retval = pciehp_readw(ctrl, PCI_EXP_LNKSTA, &lnk_status); 291 retval = pciehp_readw(ctrl, PCI_EXP_LNKSTA, &lnk_status);
284 if (retval) { 292 if (retval) {
285 ctrl_err(ctrl, "Cannot read LNKSTATUS register\n"); 293 ctrl_err(ctrl, "Cannot read LNKSTATUS register\n");
@@ -294,6 +302,16 @@ int pciehp_check_link_status(struct controller *ctrl)
294 return retval; 302 return retval;
295 } 303 }
296 304
305 /*
306 * If the port supports Link speeds greater than 5.0 GT/s, we
307 * must wait for 100 ms after Link training completes before
308 * sending configuration request.
309 */
310 if (ctrl->pcie->port->subordinate->max_bus_speed > PCIE_SPEED_5_0GT)
311 msleep(100);
312
313 pcie_update_link_speed(ctrl->pcie->port->subordinate, lnk_status);
314
297 return retval; 315 return retval;
298} 316}
299 317
@@ -484,7 +502,6 @@ int pciehp_power_on_slot(struct slot * slot)
484 u16 slot_cmd; 502 u16 slot_cmd;
485 u16 cmd_mask; 503 u16 cmd_mask;
486 u16 slot_status; 504 u16 slot_status;
487 u16 lnk_status;
488 int retval = 0; 505 int retval = 0;
489 506
490 /* Clear sticky power-fault bit from previous power failures */ 507 /* Clear sticky power-fault bit from previous power failures */
@@ -516,14 +533,6 @@ int pciehp_power_on_slot(struct slot * slot)
516 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, 533 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
517 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd); 534 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd);
518 535
519 retval = pciehp_readw(ctrl, PCI_EXP_LNKSTA, &lnk_status);
520 if (retval) {
521 ctrl_err(ctrl, "%s: Cannot read LNKSTA register\n",
522 __func__);
523 return retval;
524 }
525 pcie_update_link_speed(ctrl->pcie->port->subordinate, lnk_status);
526
527 return retval; 536 return retval;
528} 537}
529 538
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c
index aca972bbfb4c..dd7e0c51a33e 100644
--- a/drivers/pci/hotplug/shpchp_core.c
+++ b/drivers/pci/hotplug/shpchp_core.c
@@ -278,8 +278,8 @@ static int get_adapter_status (struct hotplug_slot *hotplug_slot, u8 *value)
278 278
279static int is_shpc_capable(struct pci_dev *dev) 279static int is_shpc_capable(struct pci_dev *dev)
280{ 280{
281 if ((dev->vendor == PCI_VENDOR_ID_AMD) || (dev->device == 281 if (dev->vendor == PCI_VENDOR_ID_AMD &&
282 PCI_DEVICE_ID_AMD_GOLAM_7450)) 282 dev->device == PCI_DEVICE_ID_AMD_GOLAM_7450)
283 return 1; 283 return 1;
284 if (!pci_find_capability(dev, PCI_CAP_ID_SHPC)) 284 if (!pci_find_capability(dev, PCI_CAP_ID_SHPC))
285 return 0; 285 return 0;
diff --git a/drivers/pci/hotplug/shpchp_hpc.c b/drivers/pci/hotplug/shpchp_hpc.c
index 36547f0ce305..75ba2311b54f 100644
--- a/drivers/pci/hotplug/shpchp_hpc.c
+++ b/drivers/pci/hotplug/shpchp_hpc.c
@@ -944,8 +944,8 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
944 ctrl->pci_dev = pdev; /* pci_dev of the P2P bridge */ 944 ctrl->pci_dev = pdev; /* pci_dev of the P2P bridge */
945 ctrl_dbg(ctrl, "Hotplug Controller:\n"); 945 ctrl_dbg(ctrl, "Hotplug Controller:\n");
946 946
947 if ((pdev->vendor == PCI_VENDOR_ID_AMD) || (pdev->device == 947 if (pdev->vendor == PCI_VENDOR_ID_AMD &&
948 PCI_DEVICE_ID_AMD_GOLAM_7450)) { 948 pdev->device == PCI_DEVICE_ID_AMD_GOLAM_7450) {
949 /* amd shpc driver doesn't use Base Offset; assume 0 */ 949 /* amd shpc driver doesn't use Base Offset; assume 0 */
950 ctrl->mmio_base = pci_resource_start(pdev, 0); 950 ctrl->mmio_base = pci_resource_start(pdev, 0);
951 ctrl->mmio_size = pci_resource_len(pdev, 0); 951 ctrl->mmio_size = pci_resource_len(pdev, 0);
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index ef566443f945..e17e2f8001d2 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -2,23 +2,17 @@
2# PINCTRL infrastructure and drivers 2# PINCTRL infrastructure and drivers
3# 3#
4 4
5menuconfig PINCTRL 5config PINCTRL
6 bool "PINCTRL Support" 6 bool
7 depends on EXPERIMENTAL 7 depends on EXPERIMENTAL
8 help
9 This enables the PINCTRL subsystem for controlling pins
10 on chip packages, for example multiplexing pins on primarily
11 PGA and BGA packages for systems on chip.
12
13 If unsure, say N.
14 8
15if PINCTRL 9if PINCTRL
16 10
11menu "Pin controllers"
12 depends on PINCTRL
13
17config PINMUX 14config PINMUX
18 bool "Support pinmux controllers" 15 bool "Support pinmux controllers"
19 help
20 Say Y here if you want the pincontrol subsystem to handle pin
21 multiplexing drivers.
22 16
23config DEBUG_PINCTRL 17config DEBUG_PINCTRL
24 bool "Debug PINCTRL calls" 18 bool "Debug PINCTRL calls"
@@ -30,14 +24,12 @@ config PINMUX_SIRF
30 bool "CSR SiRFprimaII pinmux driver" 24 bool "CSR SiRFprimaII pinmux driver"
31 depends on ARCH_PRIMA2 25 depends on ARCH_PRIMA2
32 select PINMUX 26 select PINMUX
33 help
34 Say Y here to enable the SiRFprimaII pinmux driver
35 27
36config PINMUX_U300 28config PINMUX_U300
37 bool "U300 pinmux driver" 29 bool "U300 pinmux driver"
38 depends on ARCH_U300 30 depends on ARCH_U300
39 select PINMUX 31 select PINMUX
40 help 32
41 Say Y here to enable the U300 pinmux driver 33endmenu
42 34
43endif 35endif
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index f4e3d82379d7..7f43cf86d776 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -83,8 +83,10 @@ config DELL_LAPTOP
83 depends on EXPERIMENTAL 83 depends on EXPERIMENTAL
84 depends on BACKLIGHT_CLASS_DEVICE 84 depends on BACKLIGHT_CLASS_DEVICE
85 depends on RFKILL || RFKILL = n 85 depends on RFKILL || RFKILL = n
86 depends on POWER_SUPPLY
87 depends on SERIO_I8042 86 depends on SERIO_I8042
87 select POWER_SUPPLY
88 select LEDS_CLASS
89 select NEW_LEDS
88 default n 90 default n
89 ---help--- 91 ---help---
90 This driver adds support for rfkill and backlight control to Dell 92 This driver adds support for rfkill and backlight control to Dell
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index a43cfd906c6d..d93e962f2610 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -589,14 +589,14 @@ static const struct backlight_ops dell_ops = {
589 .update_status = dell_send_intensity, 589 .update_status = dell_send_intensity,
590}; 590};
591 591
592static void touchpad_led_on() 592static void touchpad_led_on(void)
593{ 593{
594 int command = 0x97; 594 int command = 0x97;
595 char data = 1; 595 char data = 1;
596 i8042_command(&data, command | 1 << 12); 596 i8042_command(&data, command | 1 << 12);
597} 597}
598 598
599static void touchpad_led_off() 599static void touchpad_led_off(void)
600{ 600{
601 int command = 0x97; 601 int command = 0x97;
602 char data = 2; 602 char data = 2;
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 13ef8c37471d..dcdc1f4a4624 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -121,6 +121,7 @@ struct toshiba_acpi_dev {
121 int illumination_supported:1; 121 int illumination_supported:1;
122 int video_supported:1; 122 int video_supported:1;
123 int fan_supported:1; 123 int fan_supported:1;
124 int system_event_supported:1;
124 125
125 struct mutex mutex; 126 struct mutex mutex;
126}; 127};
@@ -724,7 +725,7 @@ static int keys_proc_show(struct seq_file *m, void *v)
724 u32 hci_result; 725 u32 hci_result;
725 u32 value; 726 u32 value;
726 727
727 if (!dev->key_event_valid) { 728 if (!dev->key_event_valid && dev->system_event_supported) {
728 hci_read1(dev, HCI_SYSTEM_EVENT, &value, &hci_result); 729 hci_read1(dev, HCI_SYSTEM_EVENT, &value, &hci_result);
729 if (hci_result == HCI_SUCCESS) { 730 if (hci_result == HCI_SUCCESS) {
730 dev->key_event_valid = 1; 731 dev->key_event_valid = 1;
@@ -964,6 +965,8 @@ static int __devinit toshiba_acpi_add(struct acpi_device *acpi_dev)
964 965
965 /* enable event fifo */ 966 /* enable event fifo */
966 hci_write1(dev, HCI_SYSTEM_EVENT, 1, &hci_result); 967 hci_write1(dev, HCI_SYSTEM_EVENT, 1, &hci_result);
968 if (hci_result == HCI_SUCCESS)
969 dev->system_event_supported = 1;
967 970
968 props.type = BACKLIGHT_PLATFORM; 971 props.type = BACKLIGHT_PLATFORM;
969 props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1; 972 props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1;
@@ -1032,12 +1035,15 @@ static void toshiba_acpi_notify(struct acpi_device *acpi_dev, u32 event)
1032{ 1035{
1033 struct toshiba_acpi_dev *dev = acpi_driver_data(acpi_dev); 1036 struct toshiba_acpi_dev *dev = acpi_driver_data(acpi_dev);
1034 u32 hci_result, value; 1037 u32 hci_result, value;
1038 int retries = 3;
1035 1039
1036 if (event != 0x80) 1040 if (!dev->system_event_supported || event != 0x80)
1037 return; 1041 return;
1042
1038 do { 1043 do {
1039 hci_read1(dev, HCI_SYSTEM_EVENT, &value, &hci_result); 1044 hci_read1(dev, HCI_SYSTEM_EVENT, &value, &hci_result);
1040 if (hci_result == HCI_SUCCESS) { 1045 switch (hci_result) {
1046 case HCI_SUCCESS:
1041 if (value == 0x100) 1047 if (value == 0x100)
1042 continue; 1048 continue;
1043 /* act on key press; ignore key release */ 1049 /* act on key press; ignore key release */
@@ -1049,14 +1055,19 @@ static void toshiba_acpi_notify(struct acpi_device *acpi_dev, u32 event)
1049 pr_info("Unknown key %x\n", 1055 pr_info("Unknown key %x\n",
1050 value); 1056 value);
1051 } 1057 }
1052 } else if (hci_result == HCI_NOT_SUPPORTED) { 1058 break;
1059 case HCI_NOT_SUPPORTED:
1053 /* This is a workaround for an unresolved issue on 1060 /* This is a workaround for an unresolved issue on
1054 * some machines where system events sporadically 1061 * some machines where system events sporadically
1055 * become disabled. */ 1062 * become disabled. */
1056 hci_write1(dev, HCI_SYSTEM_EVENT, 1, &hci_result); 1063 hci_write1(dev, HCI_SYSTEM_EVENT, 1, &hci_result);
1057 pr_notice("Re-enabled hotkeys\n"); 1064 pr_notice("Re-enabled hotkeys\n");
1065 /* fall through */
1066 default:
1067 retries--;
1068 break;
1058 } 1069 }
1059 } while (hci_result != HCI_EMPTY); 1070 } while (retries && hci_result != HCI_EMPTY);
1060} 1071}
1061 1072
1062 1073
diff --git a/drivers/power/intel_mid_battery.c b/drivers/power/intel_mid_battery.c
index cffcb7c00b00..01fa671ec97f 100644
--- a/drivers/power/intel_mid_battery.c
+++ b/drivers/power/intel_mid_battery.c
@@ -61,7 +61,8 @@ MODULE_PARM_DESC(debug, "Flag to enable PMIC Battery debug messages.");
61#define PMIC_BATT_CHR_SBATDET_MASK (1 << 5) 61#define PMIC_BATT_CHR_SBATDET_MASK (1 << 5)
62#define PMIC_BATT_CHR_SDCLMT_MASK (1 << 6) 62#define PMIC_BATT_CHR_SDCLMT_MASK (1 << 6)
63#define PMIC_BATT_CHR_SUSBOVP_MASK (1 << 7) 63#define PMIC_BATT_CHR_SUSBOVP_MASK (1 << 7)
64#define PMIC_BATT_CHR_EXCPT_MASK 0xC6 64#define PMIC_BATT_CHR_EXCPT_MASK 0x86
65
65#define PMIC_BATT_ADC_ACCCHRG_MASK (1 << 31) 66#define PMIC_BATT_ADC_ACCCHRG_MASK (1 << 31)
66#define PMIC_BATT_ADC_ACCCHRGVAL_MASK 0x7FFFFFFF 67#define PMIC_BATT_ADC_ACCCHRGVAL_MASK 0x7FFFFFFF
67 68
@@ -304,11 +305,6 @@ static void pmic_battery_read_status(struct pmic_power_module_info *pbi)
304 pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING; 305 pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
305 pmic_battery_log_event(BATT_EVENT_BATOVP_EXCPT); 306 pmic_battery_log_event(BATT_EVENT_BATOVP_EXCPT);
306 batt_exception = 1; 307 batt_exception = 1;
307 } else if (r8 & PMIC_BATT_CHR_SDCLMT_MASK) {
308 pbi->batt_health = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
309 pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
310 pmic_battery_log_event(BATT_EVENT_DCLMT_EXCPT);
311 batt_exception = 1;
312 } else if (r8 & PMIC_BATT_CHR_STEMP_MASK) { 308 } else if (r8 & PMIC_BATT_CHR_STEMP_MASK) {
313 pbi->batt_health = POWER_SUPPLY_HEALTH_OVERHEAT; 309 pbi->batt_health = POWER_SUPPLY_HEALTH_OVERHEAT;
314 pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING; 310 pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
@@ -316,6 +312,10 @@ static void pmic_battery_read_status(struct pmic_power_module_info *pbi)
316 batt_exception = 1; 312 batt_exception = 1;
317 } else { 313 } else {
318 pbi->batt_health = POWER_SUPPLY_HEALTH_GOOD; 314 pbi->batt_health = POWER_SUPPLY_HEALTH_GOOD;
315 if (r8 & PMIC_BATT_CHR_SDCLMT_MASK) {
316 /* PMIC will change charging current automatically */
317 pmic_battery_log_event(BATT_EVENT_DCLMT_EXCPT);
318 }
319 } 319 }
320 } 320 }
321 321
diff --git a/drivers/ps3/ps3-vuart.c b/drivers/ps3/ps3-vuart.c
index d9fb729535a1..fb7300837fee 100644
--- a/drivers/ps3/ps3-vuart.c
+++ b/drivers/ps3/ps3-vuart.c
@@ -952,7 +952,7 @@ static int ps3_vuart_bus_interrupt_get(void)
952 } 952 }
953 953
954 result = request_irq(vuart_bus_priv.virq, ps3_vuart_irq_handler, 954 result = request_irq(vuart_bus_priv.virq, ps3_vuart_irq_handler,
955 IRQF_DISABLED, "vuart", &vuart_bus_priv); 955 0, "vuart", &vuart_bus_priv);
956 956
957 if (result) { 957 if (result) {
958 pr_debug("%s:%d: request_irq failed (%d)\n", 958 pr_debug("%s:%d: request_irq failed (%d)\n",
diff --git a/drivers/ps3/ps3stor_lib.c b/drivers/ps3/ps3stor_lib.c
index cc328dec946b..8c3f5adf1bc6 100644
--- a/drivers/ps3/ps3stor_lib.c
+++ b/drivers/ps3/ps3stor_lib.c
@@ -167,7 +167,7 @@ int ps3stor_setup(struct ps3_storage_device *dev, irq_handler_t handler)
167 goto fail_close_device; 167 goto fail_close_device;
168 } 168 }
169 169
170 error = request_irq(dev->irq, handler, IRQF_DISABLED, 170 error = request_irq(dev->irq, handler, 0,
171 dev->sbd.core.driver->name, dev); 171 dev->sbd.core.driver->name, dev);
172 if (error) { 172 if (error) {
173 dev_err(&dev->sbd.core, "%s:%u: request_irq failed %d\n", 173 dev_err(&dev->sbd.core, "%s:%u: request_irq failed %d\n",
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index cf3f9997546d..10451a15e828 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -101,7 +101,9 @@ static s32 scaled_ppm_to_ppb(long ppm)
101 101
102static int ptp_clock_getres(struct posix_clock *pc, struct timespec *tp) 102static int ptp_clock_getres(struct posix_clock *pc, struct timespec *tp)
103{ 103{
104 return 1; /* always round timer functions to one nanosecond */ 104 tp->tv_sec = 0;
105 tp->tv_nsec = 1;
106 return 0;
105} 107}
106 108
107static int ptp_clock_settime(struct posix_clock *pc, const struct timespec *tp) 109static int ptp_clock_settime(struct posix_clock *pc, const struct timespec *tp)
diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c
index 5225930a10cd..691b1ab1a3d0 100644
--- a/drivers/rapidio/devices/tsi721.c
+++ b/drivers/rapidio/devices/tsi721.c
@@ -851,14 +851,12 @@ static int tsi721_doorbell_init(struct tsi721_device *priv)
851 INIT_WORK(&priv->idb_work, tsi721_db_dpc); 851 INIT_WORK(&priv->idb_work, tsi721_db_dpc);
852 852
853 /* Allocate buffer for inbound doorbells queue */ 853 /* Allocate buffer for inbound doorbells queue */
854 priv->idb_base = dma_alloc_coherent(&priv->pdev->dev, 854 priv->idb_base = dma_zalloc_coherent(&priv->pdev->dev,
855 IDB_QSIZE * TSI721_IDB_ENTRY_SIZE, 855 IDB_QSIZE * TSI721_IDB_ENTRY_SIZE,
856 &priv->idb_dma, GFP_KERNEL); 856 &priv->idb_dma, GFP_KERNEL);
857 if (!priv->idb_base) 857 if (!priv->idb_base)
858 return -ENOMEM; 858 return -ENOMEM;
859 859
860 memset(priv->idb_base, 0, IDB_QSIZE * TSI721_IDB_ENTRY_SIZE);
861
862 dev_dbg(&priv->pdev->dev, "Allocated IDB buffer @ %p (phys = %llx)\n", 860 dev_dbg(&priv->pdev->dev, "Allocated IDB buffer @ %p (phys = %llx)\n",
863 priv->idb_base, (unsigned long long)priv->idb_dma); 861 priv->idb_base, (unsigned long long)priv->idb_dma);
864 862
@@ -904,7 +902,7 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
904 */ 902 */
905 903
906 /* Allocate space for DMA descriptors */ 904 /* Allocate space for DMA descriptors */
907 bd_ptr = dma_alloc_coherent(&priv->pdev->dev, 905 bd_ptr = dma_zalloc_coherent(&priv->pdev->dev,
908 bd_num * sizeof(struct tsi721_dma_desc), 906 bd_num * sizeof(struct tsi721_dma_desc),
909 &bd_phys, GFP_KERNEL); 907 &bd_phys, GFP_KERNEL);
910 if (!bd_ptr) 908 if (!bd_ptr)
@@ -913,8 +911,6 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
913 priv->bdma[chnum].bd_phys = bd_phys; 911 priv->bdma[chnum].bd_phys = bd_phys;
914 priv->bdma[chnum].bd_base = bd_ptr; 912 priv->bdma[chnum].bd_base = bd_ptr;
915 913
916 memset(bd_ptr, 0, bd_num * sizeof(struct tsi721_dma_desc));
917
918 dev_dbg(&priv->pdev->dev, "DMA descriptors @ %p (phys = %llx)\n", 914 dev_dbg(&priv->pdev->dev, "DMA descriptors @ %p (phys = %llx)\n",
919 bd_ptr, (unsigned long long)bd_phys); 915 bd_ptr, (unsigned long long)bd_phys);
920 916
@@ -922,7 +918,7 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
922 sts_size = (bd_num >= TSI721_DMA_MINSTSSZ) ? 918 sts_size = (bd_num >= TSI721_DMA_MINSTSSZ) ?
923 bd_num : TSI721_DMA_MINSTSSZ; 919 bd_num : TSI721_DMA_MINSTSSZ;
924 sts_size = roundup_pow_of_two(sts_size); 920 sts_size = roundup_pow_of_two(sts_size);
925 sts_ptr = dma_alloc_coherent(&priv->pdev->dev, 921 sts_ptr = dma_zalloc_coherent(&priv->pdev->dev,
926 sts_size * sizeof(struct tsi721_dma_sts), 922 sts_size * sizeof(struct tsi721_dma_sts),
927 &sts_phys, GFP_KERNEL); 923 &sts_phys, GFP_KERNEL);
928 if (!sts_ptr) { 924 if (!sts_ptr) {
@@ -938,8 +934,6 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
938 priv->bdma[chnum].sts_base = sts_ptr; 934 priv->bdma[chnum].sts_base = sts_ptr;
939 priv->bdma[chnum].sts_size = sts_size; 935 priv->bdma[chnum].sts_size = sts_size;
940 936
941 memset(sts_ptr, 0, sts_size);
942
943 dev_dbg(&priv->pdev->dev, 937 dev_dbg(&priv->pdev->dev,
944 "desc status FIFO @ %p (phys = %llx) size=0x%x\n", 938 "desc status FIFO @ %p (phys = %llx) size=0x%x\n",
945 sts_ptr, (unsigned long long)sts_phys, sts_size); 939 sts_ptr, (unsigned long long)sts_phys, sts_size);
@@ -1400,7 +1394,7 @@ static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id,
1400 1394
1401 /* Outbound message descriptor status FIFO allocation */ 1395 /* Outbound message descriptor status FIFO allocation */
1402 priv->omsg_ring[mbox].sts_size = roundup_pow_of_two(entries + 1); 1396 priv->omsg_ring[mbox].sts_size = roundup_pow_of_two(entries + 1);
1403 priv->omsg_ring[mbox].sts_base = dma_alloc_coherent(&priv->pdev->dev, 1397 priv->omsg_ring[mbox].sts_base = dma_zalloc_coherent(&priv->pdev->dev,
1404 priv->omsg_ring[mbox].sts_size * 1398 priv->omsg_ring[mbox].sts_size *
1405 sizeof(struct tsi721_dma_sts), 1399 sizeof(struct tsi721_dma_sts),
1406 &priv->omsg_ring[mbox].sts_phys, GFP_KERNEL); 1400 &priv->omsg_ring[mbox].sts_phys, GFP_KERNEL);
@@ -1412,9 +1406,6 @@ static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id,
1412 goto out_desc; 1406 goto out_desc;
1413 } 1407 }
1414 1408
1415 memset(priv->omsg_ring[mbox].sts_base, 0,
1416 entries * sizeof(struct tsi721_dma_sts));
1417
1418 /* 1409 /*
1419 * Configure Outbound Messaging Engine 1410 * Configure Outbound Messaging Engine
1420 */ 1411 */
@@ -2116,8 +2107,8 @@ static int __devinit tsi721_setup_mport(struct tsi721_device *priv)
2116 INIT_LIST_HEAD(&mport->dbells); 2107 INIT_LIST_HEAD(&mport->dbells);
2117 2108
2118 rio_init_dbell_res(&mport->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff); 2109 rio_init_dbell_res(&mport->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff);
2119 rio_init_mbox_res(&mport->riores[RIO_INB_MBOX_RESOURCE], 0, 0); 2110 rio_init_mbox_res(&mport->riores[RIO_INB_MBOX_RESOURCE], 0, 3);
2120 rio_init_mbox_res(&mport->riores[RIO_OUTB_MBOX_RESOURCE], 0, 0); 2111 rio_init_mbox_res(&mport->riores[RIO_OUTB_MBOX_RESOURCE], 0, 3);
2121 strcpy(mport->name, "Tsi721 mport"); 2112 strcpy(mport->name, "Tsi721 mport");
2122 2113
2123 /* Hook up interrupt handler */ 2114 /* Hook up interrupt handler */
@@ -2163,7 +2154,7 @@ static int __devinit tsi721_probe(struct pci_dev *pdev,
2163 const struct pci_device_id *id) 2154 const struct pci_device_id *id)
2164{ 2155{
2165 struct tsi721_device *priv; 2156 struct tsi721_device *priv;
2166 int i; 2157 int i, cap;
2167 int err; 2158 int err;
2168 u32 regval; 2159 u32 regval;
2169 2160
@@ -2271,10 +2262,20 @@ static int __devinit tsi721_probe(struct pci_dev *pdev,
2271 dev_info(&pdev->dev, "Unable to set consistent DMA mask\n"); 2262 dev_info(&pdev->dev, "Unable to set consistent DMA mask\n");
2272 } 2263 }
2273 2264
2274 /* Clear "no snoop" and "relaxed ordering" bits. */ 2265 cap = pci_pcie_cap(pdev);
2275 pci_read_config_dword(pdev, 0x40 + PCI_EXP_DEVCTL, &regval); 2266 BUG_ON(cap == 0);
2276 regval &= ~(PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN); 2267
2277 pci_write_config_dword(pdev, 0x40 + PCI_EXP_DEVCTL, regval); 2268 /* Clear "no snoop" and "relaxed ordering" bits, use default MRRS. */
2269 pci_read_config_dword(pdev, cap + PCI_EXP_DEVCTL, &regval);
2270 regval &= ~(PCI_EXP_DEVCTL_READRQ | PCI_EXP_DEVCTL_RELAX_EN |
2271 PCI_EXP_DEVCTL_NOSNOOP_EN);
2272 regval |= 0x2 << MAX_READ_REQUEST_SZ_SHIFT;
2273 pci_write_config_dword(pdev, cap + PCI_EXP_DEVCTL, regval);
2274
2275 /* Adjust PCIe completion timeout. */
2276 pci_read_config_dword(pdev, cap + PCI_EXP_DEVCTL2, &regval);
2277 regval &= ~(0x0f);
2278 pci_write_config_dword(pdev, cap + PCI_EXP_DEVCTL2, regval | 0x2);
2278 2279
2279 /* 2280 /*
2280 * FIXUP: correct offsets of MSI-X tables in the MSI-X Capability Block 2281 * FIXUP: correct offsets of MSI-X tables in the MSI-X Capability Block
diff --git a/drivers/rapidio/devices/tsi721.h b/drivers/rapidio/devices/tsi721.h
index 58be4deb1402..822e54c394d5 100644
--- a/drivers/rapidio/devices/tsi721.h
+++ b/drivers/rapidio/devices/tsi721.h
@@ -72,6 +72,8 @@
72#define TSI721_MSIXPBA_OFFSET 0x2a000 72#define TSI721_MSIXPBA_OFFSET 0x2a000
73#define TSI721_PCIECFG_EPCTL 0x400 73#define TSI721_PCIECFG_EPCTL 0x400
74 74
75#define MAX_READ_REQUEST_SZ_SHIFT 12
76
75/* 77/*
76 * Event Management Registers 78 * Event Management Registers
77 */ 79 */
diff --git a/drivers/regulator/aat2870-regulator.c b/drivers/regulator/aat2870-regulator.c
index 5abeb3ac3e8d..298c6c6a2795 100644
--- a/drivers/regulator/aat2870-regulator.c
+++ b/drivers/regulator/aat2870-regulator.c
@@ -160,7 +160,7 @@ static struct aat2870_regulator *aat2870_get_regulator(int id)
160 break; 160 break;
161 } 161 }
162 162
163 if (!ri) 163 if (i == ARRAY_SIZE(aat2870_regulators))
164 return NULL; 164 return NULL;
165 165
166 ri->enable_addr = AAT2870_LDO_EN; 166 ri->enable_addr = AAT2870_LDO_EN;
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 669d02160221..938398f3e869 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -2799,8 +2799,8 @@ void regulator_unregister(struct regulator_dev *rdev)
2799 list_del(&rdev->list); 2799 list_del(&rdev->list);
2800 if (rdev->supply) 2800 if (rdev->supply)
2801 regulator_put(rdev->supply); 2801 regulator_put(rdev->supply);
2802 device_unregister(&rdev->dev);
2803 kfree(rdev->constraints); 2802 kfree(rdev->constraints);
2803 device_unregister(&rdev->dev);
2804 mutex_unlock(&regulator_list_mutex); 2804 mutex_unlock(&regulator_list_mutex);
2805} 2805}
2806EXPORT_SYMBOL_GPL(regulator_unregister); 2806EXPORT_SYMBOL_GPL(regulator_unregister);
diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c
index 66d2d60b436a..b552aae55b41 100644
--- a/drivers/regulator/tps65910-regulator.c
+++ b/drivers/regulator/tps65910-regulator.c
@@ -664,10 +664,10 @@ static int tps65910_set_voltage_dcdc(struct regulator_dev *dev,
664 664
665 switch (id) { 665 switch (id) {
666 case TPS65910_REG_VDD1: 666 case TPS65910_REG_VDD1:
667 dcdc_mult = (selector / VDD1_2_NUM_VOLTS) + 1; 667 dcdc_mult = (selector / VDD1_2_NUM_VOLT_FINE) + 1;
668 if (dcdc_mult == 1) 668 if (dcdc_mult == 1)
669 dcdc_mult--; 669 dcdc_mult--;
670 vsel = (selector % VDD1_2_NUM_VOLTS) + 3; 670 vsel = (selector % VDD1_2_NUM_VOLT_FINE) + 3;
671 671
672 tps65910_modify_bits(pmic, TPS65910_VDD1, 672 tps65910_modify_bits(pmic, TPS65910_VDD1,
673 (dcdc_mult << VDD1_VGAIN_SEL_SHIFT), 673 (dcdc_mult << VDD1_VGAIN_SEL_SHIFT),
@@ -675,10 +675,10 @@ static int tps65910_set_voltage_dcdc(struct regulator_dev *dev,
675 tps65910_reg_write(pmic, TPS65910_VDD1_OP, vsel); 675 tps65910_reg_write(pmic, TPS65910_VDD1_OP, vsel);
676 break; 676 break;
677 case TPS65910_REG_VDD2: 677 case TPS65910_REG_VDD2:
678 dcdc_mult = (selector / VDD1_2_NUM_VOLTS) + 1; 678 dcdc_mult = (selector / VDD1_2_NUM_VOLT_FINE) + 1;
679 if (dcdc_mult == 1) 679 if (dcdc_mult == 1)
680 dcdc_mult--; 680 dcdc_mult--;
681 vsel = (selector % VDD1_2_NUM_VOLTS) + 3; 681 vsel = (selector % VDD1_2_NUM_VOLT_FINE) + 3;
682 682
683 tps65910_modify_bits(pmic, TPS65910_VDD2, 683 tps65910_modify_bits(pmic, TPS65910_VDD2,
684 (dcdc_mult << VDD2_VGAIN_SEL_SHIFT), 684 (dcdc_mult << VDD2_VGAIN_SEL_SHIFT),
@@ -756,9 +756,9 @@ static int tps65910_list_voltage_dcdc(struct regulator_dev *dev,
756 switch (id) { 756 switch (id) {
757 case TPS65910_REG_VDD1: 757 case TPS65910_REG_VDD1:
758 case TPS65910_REG_VDD2: 758 case TPS65910_REG_VDD2:
759 mult = (selector / VDD1_2_NUM_VOLTS) + 1; 759 mult = (selector / VDD1_2_NUM_VOLT_FINE) + 1;
760 volt = VDD1_2_MIN_VOLT + 760 volt = VDD1_2_MIN_VOLT +
761 (selector % VDD1_2_NUM_VOLTS) * VDD1_2_OFFSET; 761 (selector % VDD1_2_NUM_VOLT_FINE) * VDD1_2_OFFSET;
762 break; 762 break;
763 case TPS65911_REG_VDDCTRL: 763 case TPS65911_REG_VDDCTRL:
764 volt = VDDCTRL_MIN_VOLT + (selector * VDDCTRL_OFFSET); 764 volt = VDDCTRL_MIN_VOLT + (selector * VDDCTRL_OFFSET);
@@ -947,6 +947,8 @@ static __devinit int tps65910_probe(struct platform_device *pdev)
947 947
948 if (i == TPS65910_REG_VDD1 || i == TPS65910_REG_VDD2) { 948 if (i == TPS65910_REG_VDD1 || i == TPS65910_REG_VDD2) {
949 pmic->desc[i].ops = &tps65910_ops_dcdc; 949 pmic->desc[i].ops = &tps65910_ops_dcdc;
950 pmic->desc[i].n_voltages = VDD1_2_NUM_VOLT_FINE *
951 VDD1_2_NUM_VOLT_COARSE;
950 } else if (i == TPS65910_REG_VDD3) { 952 } else if (i == TPS65910_REG_VDD3) {
951 if (tps65910_chip_id(tps65910) == TPS65910) 953 if (tps65910_chip_id(tps65910) == TPS65910)
952 pmic->desc[i].ops = &tps65910_ops_vdd3; 954 pmic->desc[i].ops = &tps65910_ops_vdd3;
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index ee8747f4fa08..11cc308d66e9 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -71,6 +71,7 @@ struct twlreg_info {
71#define VREG_TYPE 1 71#define VREG_TYPE 1
72#define VREG_REMAP 2 72#define VREG_REMAP 2
73#define VREG_DEDICATED 3 /* LDO control */ 73#define VREG_DEDICATED 3 /* LDO control */
74#define VREG_VOLTAGE_SMPS_4030 9
74/* TWL6030 register offsets */ 75/* TWL6030 register offsets */
75#define VREG_TRANS 1 76#define VREG_TRANS 1
76#define VREG_STATE 2 77#define VREG_STATE 2
@@ -514,6 +515,32 @@ static struct regulator_ops twl4030ldo_ops = {
514 .get_status = twl4030reg_get_status, 515 .get_status = twl4030reg_get_status,
515}; 516};
516 517
518static int
519twl4030smps_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV,
520 unsigned *selector)
521{
522 struct twlreg_info *info = rdev_get_drvdata(rdev);
523 int vsel = DIV_ROUND_UP(min_uV - 600000, 12500);
524
525 twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE_SMPS_4030,
526 vsel);
527 return 0;
528}
529
530static int twl4030smps_get_voltage(struct regulator_dev *rdev)
531{
532 struct twlreg_info *info = rdev_get_drvdata(rdev);
533 int vsel = twlreg_read(info, TWL_MODULE_PM_RECEIVER,
534 VREG_VOLTAGE_SMPS_4030);
535
536 return vsel * 12500 + 600000;
537}
538
539static struct regulator_ops twl4030smps_ops = {
540 .set_voltage = twl4030smps_set_voltage,
541 .get_voltage = twl4030smps_get_voltage,
542};
543
517static int twl6030ldo_list_voltage(struct regulator_dev *rdev, unsigned index) 544static int twl6030ldo_list_voltage(struct regulator_dev *rdev, unsigned index)
518{ 545{
519 struct twlreg_info *info = rdev_get_drvdata(rdev); 546 struct twlreg_info *info = rdev_get_drvdata(rdev);
@@ -856,6 +883,21 @@ static struct regulator_ops twlsmps_ops = {
856 }, \ 883 }, \
857 } 884 }
858 885
886#define TWL4030_ADJUSTABLE_SMPS(label, offset, num, turnon_delay, remap_conf) \
887 { \
888 .base = offset, \
889 .id = num, \
890 .delay = turnon_delay, \
891 .remap = remap_conf, \
892 .desc = { \
893 .name = #label, \
894 .id = TWL4030_REG_##label, \
895 .ops = &twl4030smps_ops, \
896 .type = REGULATOR_VOLTAGE, \
897 .owner = THIS_MODULE, \
898 }, \
899 }
900
859#define TWL6030_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts) { \ 901#define TWL6030_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts) { \
860 .base = offset, \ 902 .base = offset, \
861 .min_mV = min_mVolts, \ 903 .min_mV = min_mVolts, \
@@ -947,8 +989,8 @@ static struct twlreg_info twl_regs[] = {
947 TWL4030_ADJUSTABLE_LDO(VINTANA2, 0x43, 12, 100, 0x08), 989 TWL4030_ADJUSTABLE_LDO(VINTANA2, 0x43, 12, 100, 0x08),
948 TWL4030_FIXED_LDO(VINTDIG, 0x47, 1500, 13, 100, 0x08), 990 TWL4030_FIXED_LDO(VINTDIG, 0x47, 1500, 13, 100, 0x08),
949 TWL4030_ADJUSTABLE_LDO(VIO, 0x4b, 14, 1000, 0x08), 991 TWL4030_ADJUSTABLE_LDO(VIO, 0x4b, 14, 1000, 0x08),
950 TWL4030_ADJUSTABLE_LDO(VDD1, 0x55, 15, 1000, 0x08), 992 TWL4030_ADJUSTABLE_SMPS(VDD1, 0x55, 15, 1000, 0x08),
951 TWL4030_ADJUSTABLE_LDO(VDD2, 0x63, 16, 1000, 0x08), 993 TWL4030_ADJUSTABLE_SMPS(VDD2, 0x63, 16, 1000, 0x08),
952 TWL4030_FIXED_LDO(VUSB1V5, 0x71, 1500, 17, 100, 0x08), 994 TWL4030_FIXED_LDO(VUSB1V5, 0x71, 1500, 17, 100, 0x08),
953 TWL4030_FIXED_LDO(VUSB1V8, 0x74, 1800, 18, 100, 0x08), 995 TWL4030_FIXED_LDO(VUSB1V8, 0x74, 1800, 18, 100, 0x08),
954 TWL4030_FIXED_LDO(VUSB3V1, 0x77, 3100, 19, 150, 0x08), 996 TWL4030_FIXED_LDO(VUSB3V1, 0x77, 3100, 19, 150, 0x08),
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index e8326f26fa2f..dc4c2748bbc3 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -63,7 +63,7 @@ static int rtc_suspend(struct device *dev, pm_message_t mesg)
63 */ 63 */
64 delta = timespec_sub(old_system, old_rtc); 64 delta = timespec_sub(old_system, old_rtc);
65 delta_delta = timespec_sub(delta, old_delta); 65 delta_delta = timespec_sub(delta, old_delta);
66 if (abs(delta_delta.tv_sec) >= 2) { 66 if (delta_delta.tv_sec < -2 || delta_delta.tv_sec >= 2) {
67 /* 67 /*
68 * if delta_delta is too large, assume time correction 68 * if delta_delta is too large, assume time correction
69 * has occured and set old_delta to the current delta. 69 * has occured and set old_delta to the current delta.
@@ -97,9 +97,8 @@ static int rtc_resume(struct device *dev)
97 rtc_tm_to_time(&tm, &new_rtc.tv_sec); 97 rtc_tm_to_time(&tm, &new_rtc.tv_sec);
98 new_rtc.tv_nsec = 0; 98 new_rtc.tv_nsec = 0;
99 99
100 if (new_rtc.tv_sec <= old_rtc.tv_sec) { 100 if (new_rtc.tv_sec < old_rtc.tv_sec) {
101 if (new_rtc.tv_sec < old_rtc.tv_sec) 101 pr_debug("%s: time travel!\n", dev_name(&rtc->dev));
102 pr_debug("%s: time travel!\n", dev_name(&rtc->dev));
103 return 0; 102 return 0;
104 } 103 }
105 104
@@ -116,7 +115,8 @@ static int rtc_resume(struct device *dev)
116 sleep_time = timespec_sub(sleep_time, 115 sleep_time = timespec_sub(sleep_time,
117 timespec_sub(new_system, old_system)); 116 timespec_sub(new_system, old_system));
118 117
119 timekeeping_inject_sleeptime(&sleep_time); 118 if (sleep_time.tv_sec >= 0)
119 timekeeping_inject_sleeptime(&sleep_time);
120 return 0; 120 return 0;
121} 121}
122 122
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 8e286259a007..fa4d9f324189 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -319,6 +319,20 @@ int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
319} 319}
320EXPORT_SYMBOL_GPL(rtc_read_alarm); 320EXPORT_SYMBOL_GPL(rtc_read_alarm);
321 321
322static int ___rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
323{
324 int err;
325
326 if (!rtc->ops)
327 err = -ENODEV;
328 else if (!rtc->ops->set_alarm)
329 err = -EINVAL;
330 else
331 err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
332
333 return err;
334}
335
322static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) 336static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
323{ 337{
324 struct rtc_time tm; 338 struct rtc_time tm;
@@ -342,14 +356,7 @@ static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
342 * over right here, before we set the alarm. 356 * over right here, before we set the alarm.
343 */ 357 */
344 358
345 if (!rtc->ops) 359 return ___rtc_set_alarm(rtc, alarm);
346 err = -ENODEV;
347 else if (!rtc->ops->set_alarm)
348 err = -EINVAL;
349 else
350 err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
351
352 return err;
353} 360}
354 361
355int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) 362int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
@@ -763,6 +770,20 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
763 return 0; 770 return 0;
764} 771}
765 772
773static void rtc_alarm_disable(struct rtc_device *rtc)
774{
775 struct rtc_wkalrm alarm;
776 struct rtc_time tm;
777
778 __rtc_read_time(rtc, &tm);
779
780 alarm.time = rtc_ktime_to_tm(ktime_add(rtc_tm_to_ktime(tm),
781 ktime_set(300, 0)));
782 alarm.enabled = 0;
783
784 ___rtc_set_alarm(rtc, &alarm);
785}
786
766/** 787/**
767 * rtc_timer_remove - Removes a rtc_timer from the rtc_device timerqueue 788 * rtc_timer_remove - Removes a rtc_timer from the rtc_device timerqueue
768 * @rtc rtc device 789 * @rtc rtc device
@@ -784,8 +805,10 @@ static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer)
784 struct rtc_wkalrm alarm; 805 struct rtc_wkalrm alarm;
785 int err; 806 int err;
786 next = timerqueue_getnext(&rtc->timerqueue); 807 next = timerqueue_getnext(&rtc->timerqueue);
787 if (!next) 808 if (!next) {
809 rtc_alarm_disable(rtc);
788 return; 810 return;
811 }
789 alarm.time = rtc_ktime_to_tm(next->expires); 812 alarm.time = rtc_ktime_to_tm(next->expires);
790 alarm.enabled = 1; 813 alarm.enabled = 1;
791 err = __rtc_set_alarm(rtc, &alarm); 814 err = __rtc_set_alarm(rtc, &alarm);
@@ -847,7 +870,8 @@ again:
847 err = __rtc_set_alarm(rtc, &alarm); 870 err = __rtc_set_alarm(rtc, &alarm);
848 if (err == -ETIME) 871 if (err == -ETIME)
849 goto again; 872 goto again;
850 } 873 } else
874 rtc_alarm_disable(rtc);
851 875
852 mutex_unlock(&rtc->ops_lock); 876 mutex_unlock(&rtc->ops_lock);
853} 877}
diff --git a/drivers/rtc/rtc-mrst.c b/drivers/rtc/rtc-mrst.c
index d33544802a2e..bb21f443fb70 100644
--- a/drivers/rtc/rtc-mrst.c
+++ b/drivers/rtc/rtc-mrst.c
@@ -76,12 +76,15 @@ static inline unsigned char vrtc_is_updating(void)
76/* 76/*
77 * rtc_time's year contains the increment over 1900, but vRTC's YEAR 77 * rtc_time's year contains the increment over 1900, but vRTC's YEAR
78 * register can't be programmed to value larger than 0x64, so vRTC 78 * register can't be programmed to value larger than 0x64, so vRTC
79 * driver chose to use 1960 (1970 is UNIX time start point) as the base, 79 * driver chose to use 1972 (1970 is UNIX time start point) as the base,
80 * and does the translation at read/write time. 80 * and does the translation at read/write time.
81 * 81 *
82 * Why not just use 1970 as the offset? it's because using 1960 will 82 * Why not just use 1970 as the offset? it's because using 1972 will
83 * make it consistent in leap year setting for both vrtc and low-level 83 * make it consistent in leap year setting for both vrtc and low-level
84 * physical rtc devices. 84 * physical rtc devices. Then why not use 1960 as the offset? If we use
85 * 1960, for a device's first use, its YEAR register is 0 and the system
86 * year will be parsed as 1960 which is not a valid UNIX time and will
87 * cause many applications to fail mysteriously.
85 */ 88 */
86static int mrst_read_time(struct device *dev, struct rtc_time *time) 89static int mrst_read_time(struct device *dev, struct rtc_time *time)
87{ 90{
@@ -99,10 +102,10 @@ static int mrst_read_time(struct device *dev, struct rtc_time *time)
99 time->tm_year = vrtc_cmos_read(RTC_YEAR); 102 time->tm_year = vrtc_cmos_read(RTC_YEAR);
100 spin_unlock_irqrestore(&rtc_lock, flags); 103 spin_unlock_irqrestore(&rtc_lock, flags);
101 104
102 /* Adjust for the 1960/1900 */ 105 /* Adjust for the 1972/1900 */
103 time->tm_year += 60; 106 time->tm_year += 72;
104 time->tm_mon--; 107 time->tm_mon--;
105 return RTC_24H; 108 return rtc_valid_tm(time);
106} 109}
107 110
108static int mrst_set_time(struct device *dev, struct rtc_time *time) 111static int mrst_set_time(struct device *dev, struct rtc_time *time)
@@ -119,9 +122,9 @@ static int mrst_set_time(struct device *dev, struct rtc_time *time)
119 min = time->tm_min; 122 min = time->tm_min;
120 sec = time->tm_sec; 123 sec = time->tm_sec;
121 124
122 if (yrs < 70 || yrs > 138) 125 if (yrs < 72 || yrs > 138)
123 return -EINVAL; 126 return -EINVAL;
124 yrs -= 60; 127 yrs -= 72;
125 128
126 spin_lock_irqsave(&rtc_lock, flags); 129 spin_lock_irqsave(&rtc_lock, flags);
127 130
diff --git a/drivers/rtc/rtc-puv3.c b/drivers/rtc/rtc-puv3.c
index b3eba3cddd42..e4b6880aabd0 100644
--- a/drivers/rtc/rtc-puv3.c
+++ b/drivers/rtc/rtc-puv3.c
@@ -220,7 +220,7 @@ static void puv3_rtc_enable(struct platform_device *pdev, int en)
220 } 220 }
221} 221}
222 222
223static int puv3_rtc_remove(struct platform_device *dev) 223static int __devexit puv3_rtc_remove(struct platform_device *dev)
224{ 224{
225 struct rtc_device *rtc = platform_get_drvdata(dev); 225 struct rtc_device *rtc = platform_get_drvdata(dev);
226 226
@@ -236,7 +236,7 @@ static int puv3_rtc_remove(struct platform_device *dev)
236 return 0; 236 return 0;
237} 237}
238 238
239static int puv3_rtc_probe(struct platform_device *pdev) 239static int __devinit puv3_rtc_probe(struct platform_device *pdev)
240{ 240{
241 struct rtc_device *rtc; 241 struct rtc_device *rtc;
242 struct resource *res; 242 struct resource *res;
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index 7639ab906f02..5b979d9cc332 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -202,7 +202,6 @@ static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm)
202 void __iomem *base = s3c_rtc_base; 202 void __iomem *base = s3c_rtc_base;
203 int year = tm->tm_year - 100; 203 int year = tm->tm_year - 100;
204 204
205 clk_enable(rtc_clk);
206 pr_debug("set time %04d.%02d.%02d %02d:%02d:%02d\n", 205 pr_debug("set time %04d.%02d.%02d %02d:%02d:%02d\n",
207 1900 + tm->tm_year, tm->tm_mon, tm->tm_mday, 206 1900 + tm->tm_year, tm->tm_mon, tm->tm_mday,
208 tm->tm_hour, tm->tm_min, tm->tm_sec); 207 tm->tm_hour, tm->tm_min, tm->tm_sec);
@@ -214,6 +213,7 @@ static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm)
214 return -EINVAL; 213 return -EINVAL;
215 } 214 }
216 215
216 clk_enable(rtc_clk);
217 writeb(bin2bcd(tm->tm_sec), base + S3C2410_RTCSEC); 217 writeb(bin2bcd(tm->tm_sec), base + S3C2410_RTCSEC);
218 writeb(bin2bcd(tm->tm_min), base + S3C2410_RTCMIN); 218 writeb(bin2bcd(tm->tm_min), base + S3C2410_RTCMIN);
219 writeb(bin2bcd(tm->tm_hour), base + S3C2410_RTCHOUR); 219 writeb(bin2bcd(tm->tm_hour), base + S3C2410_RTCHOUR);
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 43068fbd0baa..1b6d9247fdc7 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -641,6 +641,8 @@ static int __init zcore_init(void)
641 641
642 if (ipl_info.type != IPL_TYPE_FCP_DUMP) 642 if (ipl_info.type != IPL_TYPE_FCP_DUMP)
643 return -ENODATA; 643 return -ENODATA;
644 if (OLDMEM_BASE)
645 return -ENODATA;
644 646
645 zcore_dbf = debug_register("zcore", 4, 1, 4 * sizeof(long)); 647 zcore_dbf = debug_register("zcore", 4, 1, 4 * sizeof(long));
646 debug_register_view(zcore_dbf, &debug_sprintf_view); 648 debug_register_view(zcore_dbf, &debug_sprintf_view);
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 75c3f1f8fd43..a84631a7391d 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -529,10 +529,7 @@ __s390_vary_chpid_on(struct subchannel_id schid, void *data)
529int chsc_chp_vary(struct chp_id chpid, int on) 529int chsc_chp_vary(struct chp_id chpid, int on)
530{ 530{
531 struct channel_path *chp = chpid_to_chp(chpid); 531 struct channel_path *chp = chpid_to_chp(chpid);
532 struct chp_link link;
533 532
534 memset(&link, 0, sizeof(struct chp_link));
535 link.chpid = chpid;
536 /* Wait until previous actions have settled. */ 533 /* Wait until previous actions have settled. */
537 css_wait_for_slow_path(); 534 css_wait_for_slow_path();
538 /* 535 /*
@@ -542,10 +539,10 @@ int chsc_chp_vary(struct chp_id chpid, int on)
542 /* Try to update the channel path descritor. */ 539 /* Try to update the channel path descritor. */
543 chsc_determine_base_channel_path_desc(chpid, &chp->desc); 540 chsc_determine_base_channel_path_desc(chpid, &chp->desc);
544 for_each_subchannel_staged(s390_subchannel_vary_chpid_on, 541 for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
545 __s390_vary_chpid_on, &link); 542 __s390_vary_chpid_on, &chpid);
546 } else 543 } else
547 for_each_subchannel_staged(s390_subchannel_vary_chpid_off, 544 for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
548 NULL, &link); 545 NULL, &chpid);
549 546
550 return 0; 547 return 0;
551} 548}
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index 155a82bcb9e5..4a1ff5c2eb88 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -68,8 +68,13 @@ struct schib {
68 __u8 mda[4]; /* model dependent area */ 68 __u8 mda[4]; /* model dependent area */
69} __attribute__ ((packed,aligned(4))); 69} __attribute__ ((packed,aligned(4)));
70 70
71/*
72 * When rescheduled, todo's with higher values will overwrite those
73 * with lower values.
74 */
71enum sch_todo { 75enum sch_todo {
72 SCH_TODO_NOTHING, 76 SCH_TODO_NOTHING,
77 SCH_TODO_EVAL,
73 SCH_TODO_UNREG, 78 SCH_TODO_UNREG,
74}; 79};
75 80
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 92d7324acb1c..21908e67bf67 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -195,51 +195,6 @@ void css_sch_device_unregister(struct subchannel *sch)
195} 195}
196EXPORT_SYMBOL_GPL(css_sch_device_unregister); 196EXPORT_SYMBOL_GPL(css_sch_device_unregister);
197 197
198static void css_sch_todo(struct work_struct *work)
199{
200 struct subchannel *sch;
201 enum sch_todo todo;
202
203 sch = container_of(work, struct subchannel, todo_work);
204 /* Find out todo. */
205 spin_lock_irq(sch->lock);
206 todo = sch->todo;
207 CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
208 sch->schid.sch_no, todo);
209 sch->todo = SCH_TODO_NOTHING;
210 spin_unlock_irq(sch->lock);
211 /* Perform todo. */
212 if (todo == SCH_TODO_UNREG)
213 css_sch_device_unregister(sch);
214 /* Release workqueue ref. */
215 put_device(&sch->dev);
216}
217
218/**
219 * css_sched_sch_todo - schedule a subchannel operation
220 * @sch: subchannel
221 * @todo: todo
222 *
223 * Schedule the operation identified by @todo to be performed on the slow path
224 * workqueue. Do nothing if another operation with higher priority is already
225 * scheduled. Needs to be called with subchannel lock held.
226 */
227void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
228{
229 CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
230 sch->schid.ssid, sch->schid.sch_no, todo);
231 if (sch->todo >= todo)
232 return;
233 /* Get workqueue ref. */
234 if (!get_device(&sch->dev))
235 return;
236 sch->todo = todo;
237 if (!queue_work(cio_work_q, &sch->todo_work)) {
238 /* Already queued, release workqueue ref. */
239 put_device(&sch->dev);
240 }
241}
242
243static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw) 198static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
244{ 199{
245 int i; 200 int i;
@@ -466,6 +421,65 @@ static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
466 css_schedule_eval(schid); 421 css_schedule_eval(schid);
467} 422}
468 423
424/**
425 * css_sched_sch_todo - schedule a subchannel operation
426 * @sch: subchannel
427 * @todo: todo
428 *
429 * Schedule the operation identified by @todo to be performed on the slow path
430 * workqueue. Do nothing if another operation with higher priority is already
431 * scheduled. Needs to be called with subchannel lock held.
432 */
433void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
434{
435 CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
436 sch->schid.ssid, sch->schid.sch_no, todo);
437 if (sch->todo >= todo)
438 return;
439 /* Get workqueue ref. */
440 if (!get_device(&sch->dev))
441 return;
442 sch->todo = todo;
443 if (!queue_work(cio_work_q, &sch->todo_work)) {
444 /* Already queued, release workqueue ref. */
445 put_device(&sch->dev);
446 }
447}
448
449static void css_sch_todo(struct work_struct *work)
450{
451 struct subchannel *sch;
452 enum sch_todo todo;
453 int ret;
454
455 sch = container_of(work, struct subchannel, todo_work);
456 /* Find out todo. */
457 spin_lock_irq(sch->lock);
458 todo = sch->todo;
459 CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
460 sch->schid.sch_no, todo);
461 sch->todo = SCH_TODO_NOTHING;
462 spin_unlock_irq(sch->lock);
463 /* Perform todo. */
464 switch (todo) {
465 case SCH_TODO_NOTHING:
466 break;
467 case SCH_TODO_EVAL:
468 ret = css_evaluate_known_subchannel(sch, 1);
469 if (ret == -EAGAIN) {
470 spin_lock_irq(sch->lock);
471 css_sched_sch_todo(sch, todo);
472 spin_unlock_irq(sch->lock);
473 }
474 break;
475 case SCH_TODO_UNREG:
476 css_sch_device_unregister(sch);
477 break;
478 }
479 /* Release workqueue ref. */
480 put_device(&sch->dev);
481}
482
469static struct idset *slow_subchannel_set; 483static struct idset *slow_subchannel_set;
470static spinlock_t slow_subchannel_lock; 484static spinlock_t slow_subchannel_lock;
471static wait_queue_head_t css_eval_wq; 485static wait_queue_head_t css_eval_wq;
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index d734f4a0ecac..47269858ecb6 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -1868,9 +1868,9 @@ static void __ccw_device_pm_restore(struct ccw_device *cdev)
1868 */ 1868 */
1869 cdev->private->flags.resuming = 1; 1869 cdev->private->flags.resuming = 1;
1870 cdev->private->path_new_mask = LPM_ANYPATH; 1870 cdev->private->path_new_mask = LPM_ANYPATH;
1871 css_schedule_eval(sch->schid); 1871 css_sched_sch_todo(sch, SCH_TODO_EVAL);
1872 spin_unlock_irq(sch->lock); 1872 spin_unlock_irq(sch->lock);
1873 css_complete_work(); 1873 css_wait_for_slow_path();
1874 1874
1875 /* cdev may have been moved to a different subchannel. */ 1875 /* cdev may have been moved to a different subchannel. */
1876 sch = to_subchannel(cdev->dev.parent); 1876 sch = to_subchannel(cdev->dev.parent);
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 52c233fa2b12..1b853513c891 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -496,8 +496,26 @@ static void ccw_device_reset_path_events(struct ccw_device *cdev)
496 cdev->private->pgid_reset_mask = 0; 496 cdev->private->pgid_reset_mask = 0;
497} 497}
498 498
499void 499static void create_fake_irb(struct irb *irb, int type)
500ccw_device_verify_done(struct ccw_device *cdev, int err) 500{
501 memset(irb, 0, sizeof(*irb));
502 if (type == FAKE_CMD_IRB) {
503 struct cmd_scsw *scsw = &irb->scsw.cmd;
504 scsw->cc = 1;
505 scsw->fctl = SCSW_FCTL_START_FUNC;
506 scsw->actl = SCSW_ACTL_START_PEND;
507 scsw->stctl = SCSW_STCTL_STATUS_PEND;
508 } else if (type == FAKE_TM_IRB) {
509 struct tm_scsw *scsw = &irb->scsw.tm;
510 scsw->x = 1;
511 scsw->cc = 1;
512 scsw->fctl = SCSW_FCTL_START_FUNC;
513 scsw->actl = SCSW_ACTL_START_PEND;
514 scsw->stctl = SCSW_STCTL_STATUS_PEND;
515 }
516}
517
518void ccw_device_verify_done(struct ccw_device *cdev, int err)
501{ 519{
502 struct subchannel *sch; 520 struct subchannel *sch;
503 521
@@ -520,12 +538,8 @@ callback:
520 ccw_device_done(cdev, DEV_STATE_ONLINE); 538 ccw_device_done(cdev, DEV_STATE_ONLINE);
521 /* Deliver fake irb to device driver, if needed. */ 539 /* Deliver fake irb to device driver, if needed. */
522 if (cdev->private->flags.fake_irb) { 540 if (cdev->private->flags.fake_irb) {
523 memset(&cdev->private->irb, 0, sizeof(struct irb)); 541 create_fake_irb(&cdev->private->irb,
524 cdev->private->irb.scsw.cmd.cc = 1; 542 cdev->private->flags.fake_irb);
525 cdev->private->irb.scsw.cmd.fctl = SCSW_FCTL_START_FUNC;
526 cdev->private->irb.scsw.cmd.actl = SCSW_ACTL_START_PEND;
527 cdev->private->irb.scsw.cmd.stctl =
528 SCSW_STCTL_STATUS_PEND;
529 cdev->private->flags.fake_irb = 0; 543 cdev->private->flags.fake_irb = 0;
530 if (cdev->handler) 544 if (cdev->handler)
531 cdev->handler(cdev, cdev->private->intparm, 545 cdev->handler(cdev, cdev->private->intparm,
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index f98698d5735e..ec7fb6d3b479 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -198,7 +198,7 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
198 if (cdev->private->state == DEV_STATE_VERIFY) { 198 if (cdev->private->state == DEV_STATE_VERIFY) {
199 /* Remember to fake irb when finished. */ 199 /* Remember to fake irb when finished. */
200 if (!cdev->private->flags.fake_irb) { 200 if (!cdev->private->flags.fake_irb) {
201 cdev->private->flags.fake_irb = 1; 201 cdev->private->flags.fake_irb = FAKE_CMD_IRB;
202 cdev->private->intparm = intparm; 202 cdev->private->intparm = intparm;
203 return 0; 203 return 0;
204 } else 204 } else
@@ -213,9 +213,9 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
213 ret = cio_set_options (sch, flags); 213 ret = cio_set_options (sch, flags);
214 if (ret) 214 if (ret)
215 return ret; 215 return ret;
216 /* Adjust requested path mask to excluded varied off paths. */ 216 /* Adjust requested path mask to exclude unusable paths. */
217 if (lpm) { 217 if (lpm) {
218 lpm &= sch->opm; 218 lpm &= sch->lpm;
219 if (lpm == 0) 219 if (lpm == 0)
220 return -EACCES; 220 return -EACCES;
221 } 221 }
@@ -605,11 +605,21 @@ int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw,
605 sch = to_subchannel(cdev->dev.parent); 605 sch = to_subchannel(cdev->dev.parent);
606 if (!sch->schib.pmcw.ena) 606 if (!sch->schib.pmcw.ena)
607 return -EINVAL; 607 return -EINVAL;
608 if (cdev->private->state == DEV_STATE_VERIFY) {
609 /* Remember to fake irb when finished. */
610 if (!cdev->private->flags.fake_irb) {
611 cdev->private->flags.fake_irb = FAKE_TM_IRB;
612 cdev->private->intparm = intparm;
613 return 0;
614 } else
615 /* There's already a fake I/O around. */
616 return -EBUSY;
617 }
608 if (cdev->private->state != DEV_STATE_ONLINE) 618 if (cdev->private->state != DEV_STATE_ONLINE)
609 return -EIO; 619 return -EIO;
610 /* Adjust requested path mask to excluded varied off paths. */ 620 /* Adjust requested path mask to exclude unusable paths. */
611 if (lpm) { 621 if (lpm) {
612 lpm &= sch->opm; 622 lpm &= sch->lpm;
613 if (lpm == 0) 623 if (lpm == 0)
614 return -EACCES; 624 return -EACCES;
615 } 625 }
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h
index 2ebb492a5c17..76253dfcc1be 100644
--- a/drivers/s390/cio/io_sch.h
+++ b/drivers/s390/cio/io_sch.h
@@ -111,6 +111,9 @@ enum cdev_todo {
111 CDEV_TODO_UNREG_EVAL, 111 CDEV_TODO_UNREG_EVAL,
112}; 112};
113 113
114#define FAKE_CMD_IRB 1
115#define FAKE_TM_IRB 2
116
114struct ccw_device_private { 117struct ccw_device_private {
115 struct ccw_device *cdev; 118 struct ccw_device *cdev;
116 struct subchannel *sch; 119 struct subchannel *sch;
@@ -138,7 +141,7 @@ struct ccw_device_private {
138 unsigned int doverify:1; /* delayed path verification */ 141 unsigned int doverify:1; /* delayed path verification */
139 unsigned int donotify:1; /* call notify function */ 142 unsigned int donotify:1; /* call notify function */
140 unsigned int recog_done:1; /* dev. recog. complete */ 143 unsigned int recog_done:1; /* dev. recog. complete */
141 unsigned int fake_irb:1; /* deliver faked irb */ 144 unsigned int fake_irb:2; /* deliver faked irb */
142 unsigned int resuming:1; /* recognition while resume */ 145 unsigned int resuming:1; /* recognition while resume */
143 unsigned int pgroup:1; /* pathgroup is set up */ 146 unsigned int pgroup:1; /* pathgroup is set up */
144 unsigned int mpath:1; /* multipathing is set up */ 147 unsigned int mpath:1; /* multipathing is set up */
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index b77ae519d79c..96bbe9d12a79 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -1271,18 +1271,16 @@ ap_config_timeout(unsigned long ptr)
1271} 1271}
1272 1272
1273/** 1273/**
1274 * ap_schedule_poll_timer(): Schedule poll timer. 1274 * __ap_schedule_poll_timer(): Schedule poll timer.
1275 * 1275 *
1276 * Set up the timer to run the poll tasklet 1276 * Set up the timer to run the poll tasklet
1277 */ 1277 */
1278static inline void ap_schedule_poll_timer(void) 1278static inline void __ap_schedule_poll_timer(void)
1279{ 1279{
1280 ktime_t hr_time; 1280 ktime_t hr_time;
1281 1281
1282 spin_lock_bh(&ap_poll_timer_lock); 1282 spin_lock_bh(&ap_poll_timer_lock);
1283 if (ap_using_interrupts() || ap_suspend_flag) 1283 if (hrtimer_is_queued(&ap_poll_timer) || ap_suspend_flag)
1284 goto out;
1285 if (hrtimer_is_queued(&ap_poll_timer))
1286 goto out; 1284 goto out;
1287 if (ktime_to_ns(hrtimer_expires_remaining(&ap_poll_timer)) <= 0) { 1285 if (ktime_to_ns(hrtimer_expires_remaining(&ap_poll_timer)) <= 0) {
1288 hr_time = ktime_set(0, poll_timeout); 1286 hr_time = ktime_set(0, poll_timeout);
@@ -1294,6 +1292,18 @@ out:
1294} 1292}
1295 1293
1296/** 1294/**
1295 * ap_schedule_poll_timer(): Schedule poll timer.
1296 *
1297 * Set up the timer to run the poll tasklet
1298 */
1299static inline void ap_schedule_poll_timer(void)
1300{
1301 if (ap_using_interrupts())
1302 return;
1303 __ap_schedule_poll_timer();
1304}
1305
1306/**
1297 * ap_poll_read(): Receive pending reply messages from an AP device. 1307 * ap_poll_read(): Receive pending reply messages from an AP device.
1298 * @ap_dev: pointer to the AP device 1308 * @ap_dev: pointer to the AP device
1299 * @flags: pointer to control flags, bit 2^0 is set if another poll is 1309 * @flags: pointer to control flags, bit 2^0 is set if another poll is
@@ -1374,8 +1384,9 @@ static int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags)
1374 *flags |= 1; 1384 *flags |= 1;
1375 *flags |= 2; 1385 *flags |= 2;
1376 break; 1386 break;
1377 case AP_RESPONSE_Q_FULL:
1378 case AP_RESPONSE_RESET_IN_PROGRESS: 1387 case AP_RESPONSE_RESET_IN_PROGRESS:
1388 __ap_schedule_poll_timer();
1389 case AP_RESPONSE_Q_FULL:
1379 *flags |= 2; 1390 *flags |= 2;
1380 break; 1391 break;
1381 case AP_RESPONSE_MESSAGE_TOO_BIG: 1392 case AP_RESPONSE_MESSAGE_TOO_BIG:
@@ -1541,6 +1552,8 @@ static void ap_reset(struct ap_device *ap_dev)
1541 rc = ap_init_queue(ap_dev->qid); 1552 rc = ap_init_queue(ap_dev->qid);
1542 if (rc == -ENODEV) 1553 if (rc == -ENODEV)
1543 ap_dev->unregistered = 1; 1554 ap_dev->unregistered = 1;
1555 else
1556 __ap_schedule_poll_timer();
1544} 1557}
1545 1558
1546static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags) 1559static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags)
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
index fa80ba1f0344..9b66d2d1809b 100644
--- a/drivers/s390/net/Kconfig
+++ b/drivers/s390/net/Kconfig
@@ -4,7 +4,7 @@ menu "S/390 network device drivers"
4config LCS 4config LCS
5 def_tristate m 5 def_tristate m
6 prompt "Lan Channel Station Interface" 6 prompt "Lan Channel Station Interface"
7 depends on CCW && NETDEVICES && (NET_ETHERNET || TR || FDDI) 7 depends on CCW && NETDEVICES && (ETHERNET || TR || FDDI)
8 help 8 help
9 Select this option if you want to use LCS networking on IBM System z. 9 Select this option if you want to use LCS networking on IBM System z.
10 This device driver supports Token Ring (IEEE 802.5), 10 This device driver supports Token Ring (IEEE 802.5),
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index c28713da1ec5..863fc2197155 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -50,7 +50,7 @@
50#include "lcs.h" 50#include "lcs.h"
51 51
52 52
53#if !defined(CONFIG_NET_ETHERNET) && \ 53#if !defined(CONFIG_ETHERNET) && \
54 !defined(CONFIG_TR) && !defined(CONFIG_FDDI) 54 !defined(CONFIG_TR) && !defined(CONFIG_FDDI)
55#error Cannot compile lcs.c without some net devices switched on. 55#error Cannot compile lcs.c without some net devices switched on.
56#endif 56#endif
@@ -1634,7 +1634,7 @@ lcs_startlan_auto(struct lcs_card *card)
1634 int rc; 1634 int rc;
1635 1635
1636 LCS_DBF_TEXT(2, trace, "strtauto"); 1636 LCS_DBF_TEXT(2, trace, "strtauto");
1637#ifdef CONFIG_NET_ETHERNET 1637#ifdef CONFIG_ETHERNET
1638 card->lan_type = LCS_FRAME_TYPE_ENET; 1638 card->lan_type = LCS_FRAME_TYPE_ENET;
1639 rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP); 1639 rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP);
1640 if (rc == 0) 1640 if (rc == 0)
@@ -2166,7 +2166,7 @@ lcs_new_device(struct ccwgroup_device *ccwgdev)
2166 goto netdev_out; 2166 goto netdev_out;
2167 } 2167 }
2168 switch (card->lan_type) { 2168 switch (card->lan_type) {
2169#ifdef CONFIG_NET_ETHERNET 2169#ifdef CONFIG_ETHERNET
2170 case LCS_FRAME_TYPE_ENET: 2170 case LCS_FRAME_TYPE_ENET:
2171 card->lan_type_trans = eth_type_trans; 2171 card->lan_type_trans = eth_type_trans;
2172 dev = alloc_etherdev(0); 2172 dev = alloc_etherdev(0);
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 3251333a23df..b6a6356d09b3 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -1994,6 +1994,8 @@ static struct net_device *netiucv_init_netdevice(char *username)
1994 netiucv_setup_netdevice); 1994 netiucv_setup_netdevice);
1995 if (!dev) 1995 if (!dev)
1996 return NULL; 1996 return NULL;
1997 if (dev_alloc_name(dev, dev->name) < 0)
1998 goto out_netdev;
1997 1999
1998 privptr = netdev_priv(dev); 2000 privptr = netdev_priv(dev);
1999 privptr->fsm = init_fsm("netiucvdev", dev_state_names, 2001 privptr->fsm = init_fsm("netiucvdev", dev_state_names,
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index b77c65ed1381..4abc79d3963f 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -236,8 +236,7 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
236#define QETH_IN_BUF_COUNT_MAX 128 236#define QETH_IN_BUF_COUNT_MAX 128
237#define QETH_MAX_BUFFER_ELEMENTS(card) ((card)->qdio.in_buf_size >> 12) 237#define QETH_MAX_BUFFER_ELEMENTS(card) ((card)->qdio.in_buf_size >> 12)
238#define QETH_IN_BUF_REQUEUE_THRESHOLD(card) \ 238#define QETH_IN_BUF_REQUEUE_THRESHOLD(card) \
239 ((card)->ssqd.qdioac1 & AC1_SIGA_INPUT_NEEDED ? 1 : \ 239 ((card)->qdio.in_buf_pool.buf_count / 2)
240 ((card)->qdio.in_buf_pool.buf_count / 2))
241 240
242/* buffers we have to be behind before we get a PCI */ 241/* buffers we have to be behind before we get a PCI */
243#define QETH_PCI_THRESHOLD_A(card) ((card)->qdio.in_buf_pool.buf_count+1) 242#define QETH_PCI_THRESHOLD_A(card) ((card)->qdio.in_buf_pool.buf_count+1)
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 81534437373a..fff57de78943 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -881,7 +881,6 @@ EXPORT_SYMBOL_GPL(qeth_do_run_thread);
881void qeth_schedule_recovery(struct qeth_card *card) 881void qeth_schedule_recovery(struct qeth_card *card)
882{ 882{
883 QETH_CARD_TEXT(card, 2, "startrec"); 883 QETH_CARD_TEXT(card, 2, "startrec");
884 WARN_ON(1);
885 if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0) 884 if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0)
886 schedule_work(&card->kernel_thread_starter); 885 schedule_work(&card->kernel_thread_starter);
887} 886}
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index e4c1176ee25b..4d5307ddbe55 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -2756,11 +2756,13 @@ int inline qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
2756 struct neighbour *n = NULL; 2756 struct neighbour *n = NULL;
2757 struct dst_entry *dst; 2757 struct dst_entry *dst;
2758 2758
2759 rcu_read_lock();
2759 dst = skb_dst(skb); 2760 dst = skb_dst(skb);
2760 if (dst) 2761 if (dst)
2761 n = dst_get_neighbour(dst); 2762 n = dst_get_neighbour(dst);
2762 if (n) { 2763 if (n) {
2763 cast_type = n->type; 2764 cast_type = n->type;
2765 rcu_read_unlock();
2764 if ((cast_type == RTN_BROADCAST) || 2766 if ((cast_type == RTN_BROADCAST) ||
2765 (cast_type == RTN_MULTICAST) || 2767 (cast_type == RTN_MULTICAST) ||
2766 (cast_type == RTN_ANYCAST)) 2768 (cast_type == RTN_ANYCAST))
@@ -2768,6 +2770,8 @@ int inline qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
2768 else 2770 else
2769 return RTN_UNSPEC; 2771 return RTN_UNSPEC;
2770 } 2772 }
2773 rcu_read_unlock();
2774
2771 /* try something else */ 2775 /* try something else */
2772 if (skb->protocol == ETH_P_IPV6) 2776 if (skb->protocol == ETH_P_IPV6)
2773 return (skb_network_header(skb)[24] == 0xff) ? 2777 return (skb_network_header(skb)[24] == 0xff) ?
@@ -2847,6 +2851,8 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
2847 } 2851 }
2848 2852
2849 hdr->hdr.l3.length = skb->len - sizeof(struct qeth_hdr); 2853 hdr->hdr.l3.length = skb->len - sizeof(struct qeth_hdr);
2854
2855 rcu_read_lock();
2850 dst = skb_dst(skb); 2856 dst = skb_dst(skb);
2851 if (dst) 2857 if (dst)
2852 n = dst_get_neighbour(dst); 2858 n = dst_get_neighbour(dst);
@@ -2893,6 +2899,7 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
2893 QETH_CAST_UNICAST | QETH_HDR_PASSTHRU; 2899 QETH_CAST_UNICAST | QETH_HDR_PASSTHRU;
2894 } 2900 }
2895 } 2901 }
2902 rcu_read_unlock();
2896} 2903}
2897 2904
2898static inline void qeth_l3_hdr_csum(struct qeth_card *card, 2905static inline void qeth_l3_hdr_csum(struct qeth_card *card,
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index 0ea2fbfe0e99..d979bb26522f 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -335,10 +335,10 @@ static ssize_t qeth_l3_dev_sniffer_store(struct device *dev,
335 QETH_IN_BUF_COUNT_MAX) 335 QETH_IN_BUF_COUNT_MAX)
336 qeth_realloc_buffer_pool(card, 336 qeth_realloc_buffer_pool(card,
337 QETH_IN_BUF_COUNT_MAX); 337 QETH_IN_BUF_COUNT_MAX);
338 break;
339 } else 338 } else
340 rc = -EPERM; 339 rc = -EPERM;
341 default: /* fall through */ 340 break;
341 default:
342 rc = -EINVAL; 342 rc = -EINVAL;
343 } 343 }
344out: 344out:
diff --git a/drivers/sbus/char/bbc_i2c.c b/drivers/sbus/char/bbc_i2c.c
index 5f94d22c491e..542668292900 100644
--- a/drivers/sbus/char/bbc_i2c.c
+++ b/drivers/sbus/char/bbc_i2c.c
@@ -233,13 +233,9 @@ int bbc_i2c_write_buf(struct bbc_i2c_client *client,
233 int ret = 0; 233 int ret = 0;
234 234
235 while (len > 0) { 235 while (len > 0) {
236 int err = bbc_i2c_writeb(client, *buf, off); 236 ret = bbc_i2c_writeb(client, *buf, off);
237 237 if (ret < 0)
238 if (err < 0) {
239 ret = err;
240 break; 238 break;
241 }
242
243 len--; 239 len--;
244 buf++; 240 buf++;
245 off++; 241 off++;
@@ -253,11 +249,9 @@ int bbc_i2c_read_buf(struct bbc_i2c_client *client,
253 int ret = 0; 249 int ret = 0;
254 250
255 while (len > 0) { 251 while (len > 0) {
256 int err = bbc_i2c_readb(client, buf, off); 252 ret = bbc_i2c_readb(client, buf, off);
257 if (err < 0) { 253 if (ret < 0)
258 ret = err;
259 break; 254 break;
260 }
261 len--; 255 len--;
262 buf++; 256 buf++;
263 off++; 257 off++;
@@ -422,17 +416,6 @@ static struct platform_driver bbc_i2c_driver = {
422 .remove = __devexit_p(bbc_i2c_remove), 416 .remove = __devexit_p(bbc_i2c_remove),
423}; 417};
424 418
425static int __init bbc_i2c_init(void) 419module_platform_driver(bbc_i2c_driver);
426{
427 return platform_driver_register(&bbc_i2c_driver);
428}
429
430static void __exit bbc_i2c_exit(void)
431{
432 platform_driver_unregister(&bbc_i2c_driver);
433}
434
435module_init(bbc_i2c_init);
436module_exit(bbc_i2c_exit);
437 420
438MODULE_LICENSE("GPL"); 421MODULE_LICENSE("GPL");
diff --git a/drivers/sbus/char/display7seg.c b/drivers/sbus/char/display7seg.c
index 965a1fccd66a..4b9939726c34 100644
--- a/drivers/sbus/char/display7seg.c
+++ b/drivers/sbus/char/display7seg.c
@@ -275,15 +275,4 @@ static struct platform_driver d7s_driver = {
275 .remove = __devexit_p(d7s_remove), 275 .remove = __devexit_p(d7s_remove),
276}; 276};
277 277
278static int __init d7s_init(void) 278module_platform_driver(d7s_driver);
279{
280 return platform_driver_register(&d7s_driver);
281}
282
283static void __exit d7s_exit(void)
284{
285 platform_driver_unregister(&d7s_driver);
286}
287
288module_init(d7s_init);
289module_exit(d7s_exit);
diff --git a/drivers/sbus/char/envctrl.c b/drivers/sbus/char/envctrl.c
index be7b4e56154f..339fd6f65eda 100644
--- a/drivers/sbus/char/envctrl.c
+++ b/drivers/sbus/char/envctrl.c
@@ -1138,16 +1138,6 @@ static struct platform_driver envctrl_driver = {
1138 .remove = __devexit_p(envctrl_remove), 1138 .remove = __devexit_p(envctrl_remove),
1139}; 1139};
1140 1140
1141static int __init envctrl_init(void) 1141module_platform_driver(envctrl_driver);
1142{
1143 return platform_driver_register(&envctrl_driver);
1144}
1145
1146static void __exit envctrl_exit(void)
1147{
1148 platform_driver_unregister(&envctrl_driver);
1149}
1150 1142
1151module_init(envctrl_init);
1152module_exit(envctrl_exit);
1153MODULE_LICENSE("GPL"); 1143MODULE_LICENSE("GPL");
diff --git a/drivers/sbus/char/flash.c b/drivers/sbus/char/flash.c
index 73dd4e7afaaa..826157f38694 100644
--- a/drivers/sbus/char/flash.c
+++ b/drivers/sbus/char/flash.c
@@ -216,16 +216,6 @@ static struct platform_driver flash_driver = {
216 .remove = __devexit_p(flash_remove), 216 .remove = __devexit_p(flash_remove),
217}; 217};
218 218
219static int __init flash_init(void) 219module_platform_driver(flash_driver);
220{
221 return platform_driver_register(&flash_driver);
222}
223
224static void __exit flash_cleanup(void)
225{
226 platform_driver_unregister(&flash_driver);
227}
228 220
229module_init(flash_init);
230module_exit(flash_cleanup);
231MODULE_LICENSE("GPL"); 221MODULE_LICENSE("GPL");
diff --git a/drivers/sbus/char/uctrl.c b/drivers/sbus/char/uctrl.c
index ebce9639a26a..0b31658ccde5 100644
--- a/drivers/sbus/char/uctrl.c
+++ b/drivers/sbus/char/uctrl.c
@@ -435,16 +435,6 @@ static struct platform_driver uctrl_driver = {
435}; 435};
436 436
437 437
438static int __init uctrl_init(void) 438module_platform_driver(uctrl_driver);
439{
440 return platform_driver_register(&uctrl_driver);
441}
442
443static void __exit uctrl_exit(void)
444{
445 platform_driver_unregister(&uctrl_driver);
446}
447 439
448module_init(uctrl_init);
449module_exit(uctrl_exit);
450MODULE_LICENSE("GPL"); 440MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 4aa76d6f11df..705e13e470af 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -38,6 +38,7 @@
38#include <linux/module.h> 38#include <linux/module.h>
39#include <linux/moduleparam.h> 39#include <linux/moduleparam.h>
40#include <linux/pci.h> 40#include <linux/pci.h>
41#include <linux/pci-aspm.h>
41#include <linux/slab.h> 42#include <linux/slab.h>
42#include <linux/mutex.h> 43#include <linux/mutex.h>
43#include <linux/spinlock.h> 44#include <linux/spinlock.h>
@@ -1109,6 +1110,9 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
1109 unique_id++; 1110 unique_id++;
1110 } 1111 }
1111 1112
1113 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
1114 PCIE_LINK_STATE_CLKPM);
1115
1112 error = pci_enable_device(pdev); 1116 error = pci_enable_device(pdev);
1113 if (error) 1117 if (error)
1114 goto out; 1118 goto out;
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index e76107b2ade3..865d452542be 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -23,6 +23,7 @@
23#include <linux/interrupt.h> 23#include <linux/interrupt.h>
24#include <linux/types.h> 24#include <linux/types.h>
25#include <linux/pci.h> 25#include <linux/pci.h>
26#include <linux/pci-aspm.h>
26#include <linux/kernel.h> 27#include <linux/kernel.h>
27#include <linux/slab.h> 28#include <linux/slab.h>
28#include <linux/delay.h> 29#include <linux/delay.h>
@@ -3922,6 +3923,10 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
3922 dev_warn(&h->pdev->dev, "controller appears to be disabled\n"); 3923 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
3923 return -ENODEV; 3924 return -ENODEV;
3924 } 3925 }
3926
3927 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
3928 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
3929
3925 err = pci_enable_device(h->pdev); 3930 err = pci_enable_device(h->pdev);
3926 if (err) { 3931 if (err) {
3927 dev_warn(&h->pdev->dev, "unable to enable PCI device\n"); 3932 dev_warn(&h->pdev->dev, "unable to enable PCI device\n");
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index 8889b1babcac..4e041f6d808c 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -2802,6 +2802,11 @@ _scsih_error_recovery_delete_devices(struct MPT2SAS_ADAPTER *ioc)
2802 2802
2803 if (ioc->is_driver_loading) 2803 if (ioc->is_driver_loading)
2804 return; 2804 return;
2805
2806 fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
2807 if (!fw_event)
2808 return;
2809
2805 fw_event->event = MPT2SAS_REMOVE_UNRESPONDING_DEVICES; 2810 fw_event->event = MPT2SAS_REMOVE_UNRESPONDING_DEVICES;
2806 fw_event->ioc = ioc; 2811 fw_event->ioc = ioc;
2807 _scsih_fw_event_add(ioc, fw_event); 2812 _scsih_fw_event_add(ioc, fw_event);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 06bc26554a67..f85cfa6c47b5 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1409,6 +1409,8 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
1409 1409
1410 blk_start_request(req); 1410 blk_start_request(req);
1411 1411
1412 scmd_printk(KERN_INFO, cmd, "killing request\n");
1413
1412 sdev = cmd->device; 1414 sdev = cmd->device;
1413 starget = scsi_target(sdev); 1415 starget = scsi_target(sdev);
1414 shost = sdev->host; 1416 shost = sdev->host;
@@ -1490,7 +1492,6 @@ static void scsi_request_fn(struct request_queue *q)
1490 struct request *req; 1492 struct request *req;
1491 1493
1492 if (!sdev) { 1494 if (!sdev) {
1493 printk("scsi: killing requests for dead queue\n");
1494 while ((req = blk_peek_request(q)) != NULL) 1495 while ((req = blk_peek_request(q)) != NULL)
1495 scsi_kill_request(req, q); 1496 scsi_kill_request(req, q);
1496 return; 1497 return;
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 72273a0e5666..b3c6d957fbd8 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -319,11 +319,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
319 return sdev; 319 return sdev;
320 320
321out_device_destroy: 321out_device_destroy:
322 scsi_device_set_state(sdev, SDEV_DEL); 322 __scsi_remove_device(sdev);
323 transport_destroy_device(&sdev->sdev_gendev);
324 put_device(&sdev->sdev_dev);
325 scsi_free_queue(sdev->request_queue);
326 put_device(&sdev->sdev_gendev);
327out: 323out:
328 if (display_failure_msg) 324 if (display_failure_msg)
329 printk(ALLOC_FAILURE_MSG, __func__); 325 printk(ALLOC_FAILURE_MSG, __func__);
diff --git a/drivers/sh/Makefile b/drivers/sh/Makefile
index 24e6cec0ae8d..67e272ab1623 100644
--- a/drivers/sh/Makefile
+++ b/drivers/sh/Makefile
@@ -7,3 +7,11 @@ obj-$(CONFIG_HAVE_CLK) += clk/
7obj-$(CONFIG_MAPLE) += maple/ 7obj-$(CONFIG_MAPLE) += maple/
8obj-$(CONFIG_SUPERHYWAY) += superhyway/ 8obj-$(CONFIG_SUPERHYWAY) += superhyway/
9obj-$(CONFIG_GENERIC_GPIO) += pfc.o 9obj-$(CONFIG_GENERIC_GPIO) += pfc.o
10
11#
12# For the moment we only use this framework for ARM-based SH/R-Mobile
13# platforms and generic SH. SH-based SH-Mobile platforms are still using
14# an older framework that is pending up-porting, at which point this
15# special casing can go away.
16#
17obj-$(CONFIG_SUPERH)$(CONFIG_ARCH_SHMOBILE) += pm_runtime.o
diff --git a/drivers/sh/clk/core.c b/drivers/sh/clk/core.c
index dc8d022c07a1..db257a35e71a 100644
--- a/drivers/sh/clk/core.c
+++ b/drivers/sh/clk/core.c
@@ -25,7 +25,6 @@
25#include <linux/seq_file.h> 25#include <linux/seq_file.h>
26#include <linux/err.h> 26#include <linux/err.h>
27#include <linux/io.h> 27#include <linux/io.h>
28#include <linux/debugfs.h>
29#include <linux/cpufreq.h> 28#include <linux/cpufreq.h>
30#include <linux/clk.h> 29#include <linux/clk.h>
31#include <linux/sh_clk.h> 30#include <linux/sh_clk.h>
@@ -173,6 +172,26 @@ long clk_rate_div_range_round(struct clk *clk, unsigned int div_min,
173 return clk_rate_round_helper(&div_range_round); 172 return clk_rate_round_helper(&div_range_round);
174} 173}
175 174
175static long clk_rate_mult_range_iter(unsigned int pos,
176 struct clk_rate_round_data *rounder)
177{
178 return clk_get_rate(rounder->arg) * pos;
179}
180
181long clk_rate_mult_range_round(struct clk *clk, unsigned int mult_min,
182 unsigned int mult_max, unsigned long rate)
183{
184 struct clk_rate_round_data mult_range_round = {
185 .min = mult_min,
186 .max = mult_max,
187 .func = clk_rate_mult_range_iter,
188 .arg = clk_get_parent(clk),
189 .rate = rate,
190 };
191
192 return clk_rate_round_helper(&mult_range_round);
193}
194
176int clk_rate_table_find(struct clk *clk, 195int clk_rate_table_find(struct clk *clk,
177 struct cpufreq_frequency_table *freq_table, 196 struct cpufreq_frequency_table *freq_table,
178 unsigned long rate) 197 unsigned long rate)
@@ -205,9 +224,6 @@ int clk_reparent(struct clk *child, struct clk *parent)
205 list_add(&child->sibling, &parent->children); 224 list_add(&child->sibling, &parent->children);
206 child->parent = parent; 225 child->parent = parent;
207 226
208 /* now do the debugfs renaming to reattach the child
209 to the proper parent */
210
211 return 0; 227 return 0;
212} 228}
213 229
@@ -665,89 +681,6 @@ static int __init clk_syscore_init(void)
665subsys_initcall(clk_syscore_init); 681subsys_initcall(clk_syscore_init);
666#endif 682#endif
667 683
668/*
669 * debugfs support to trace clock tree hierarchy and attributes
670 */
671static struct dentry *clk_debugfs_root;
672
673static int clk_debugfs_register_one(struct clk *c)
674{
675 int err;
676 struct dentry *d;
677 struct clk *pa = c->parent;
678 char s[255];
679 char *p = s;
680
681 p += sprintf(p, "%p", c);
682 d = debugfs_create_dir(s, pa ? pa->dentry : clk_debugfs_root);
683 if (!d)
684 return -ENOMEM;
685 c->dentry = d;
686
687 d = debugfs_create_u8("usecount", S_IRUGO, c->dentry, (u8 *)&c->usecount);
688 if (!d) {
689 err = -ENOMEM;
690 goto err_out;
691 }
692 d = debugfs_create_u32("rate", S_IRUGO, c->dentry, (u32 *)&c->rate);
693 if (!d) {
694 err = -ENOMEM;
695 goto err_out;
696 }
697 d = debugfs_create_x32("flags", S_IRUGO, c->dentry, (u32 *)&c->flags);
698 if (!d) {
699 err = -ENOMEM;
700 goto err_out;
701 }
702 return 0;
703
704err_out:
705 debugfs_remove_recursive(c->dentry);
706 return err;
707}
708
709static int clk_debugfs_register(struct clk *c)
710{
711 int err;
712 struct clk *pa = c->parent;
713
714 if (pa && !pa->dentry) {
715 err = clk_debugfs_register(pa);
716 if (err)
717 return err;
718 }
719
720 if (!c->dentry) {
721 err = clk_debugfs_register_one(c);
722 if (err)
723 return err;
724 }
725 return 0;
726}
727
728static int __init clk_debugfs_init(void)
729{
730 struct clk *c;
731 struct dentry *d;
732 int err;
733
734 d = debugfs_create_dir("clock", NULL);
735 if (!d)
736 return -ENOMEM;
737 clk_debugfs_root = d;
738
739 list_for_each_entry(c, &clock_list, node) {
740 err = clk_debugfs_register(c);
741 if (err)
742 goto err_out;
743 }
744 return 0;
745err_out:
746 debugfs_remove_recursive(clk_debugfs_root);
747 return err;
748}
749late_initcall(clk_debugfs_init);
750
751static int __init clk_late_init(void) 684static int __init clk_late_init(void)
752{ 685{
753 unsigned long flags; 686 unsigned long flags;
diff --git a/drivers/sh/pm_runtime.c b/drivers/sh/pm_runtime.c
new file mode 100644
index 000000000000..afe9282629b9
--- /dev/null
+++ b/drivers/sh/pm_runtime.c
@@ -0,0 +1,65 @@
1/*
2 * Runtime PM support code
3 *
4 * Copyright (C) 2009-2010 Magnus Damm
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10
11#include <linux/init.h>
12#include <linux/kernel.h>
13#include <linux/io.h>
14#include <linux/pm_runtime.h>
15#include <linux/pm_domain.h>
16#include <linux/pm_clock.h>
17#include <linux/platform_device.h>
18#include <linux/clk.h>
19#include <linux/sh_clk.h>
20#include <linux/bitmap.h>
21#include <linux/slab.h>
22
23#ifdef CONFIG_PM_RUNTIME
24
25static int default_platform_runtime_idle(struct device *dev)
26{
27 /* suspend synchronously to disable clocks immediately */
28 return pm_runtime_suspend(dev);
29}
30
31static struct dev_pm_domain default_pm_domain = {
32 .ops = {
33 .runtime_suspend = pm_clk_suspend,
34 .runtime_resume = pm_clk_resume,
35 .runtime_idle = default_platform_runtime_idle,
36 USE_PLATFORM_PM_SLEEP_OPS
37 },
38};
39
40#define DEFAULT_PM_DOMAIN_PTR (&default_pm_domain)
41
42#else
43
44#define DEFAULT_PM_DOMAIN_PTR NULL
45
46#endif /* CONFIG_PM_RUNTIME */
47
48static struct pm_clk_notifier_block platform_bus_notifier = {
49 .pm_domain = DEFAULT_PM_DOMAIN_PTR,
50 .con_ids = { NULL, },
51};
52
53static int __init sh_pm_runtime_init(void)
54{
55 pm_clk_add_notifier(&platform_bus_type, &platform_bus_notifier);
56 return 0;
57}
58core_initcall(sh_pm_runtime_init);
59
60static int __init sh_pm_runtime_late_init(void)
61{
62 pm_genpd_poweroff_unused();
63 return 0;
64}
65late_initcall(sh_pm_runtime_late_init);
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index a1fd73df5416..8ba4510a9519 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -199,7 +199,7 @@ config SPI_FSL_LIB
199 depends on FSL_SOC 199 depends on FSL_SOC
200 200
201config SPI_FSL_SPI 201config SPI_FSL_SPI
202 tristate "Freescale SPI controller" 202 bool "Freescale SPI controller"
203 depends on FSL_SOC 203 depends on FSL_SOC
204 select SPI_FSL_LIB 204 select SPI_FSL_LIB
205 help 205 help
@@ -208,7 +208,7 @@ config SPI_FSL_SPI
208 MPC8569 uses the controller in QE mode, MPC8610 in cpu mode. 208 MPC8569 uses the controller in QE mode, MPC8610 in cpu mode.
209 209
210config SPI_FSL_ESPI 210config SPI_FSL_ESPI
211 tristate "Freescale eSPI controller" 211 bool "Freescale eSPI controller"
212 depends on FSL_SOC 212 depends on FSL_SOC
213 select SPI_FSL_LIB 213 select SPI_FSL_LIB
214 help 214 help
diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c
index 024b48aed5ca..acc88b4d2869 100644
--- a/drivers/spi/spi-ath79.c
+++ b/drivers/spi/spi-ath79.c
@@ -13,6 +13,7 @@
13 */ 13 */
14 14
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/module.h>
16#include <linux/init.h> 17#include <linux/init.h>
17#include <linux/delay.h> 18#include <linux/delay.h>
18#include <linux/spinlock.h> 19#include <linux/spinlock.h>
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index 79665e2e6ec5..16d6a839c7fa 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -907,7 +907,7 @@ static void atmel_spi_cleanup(struct spi_device *spi)
907 907
908/*-------------------------------------------------------------------------*/ 908/*-------------------------------------------------------------------------*/
909 909
910static int __init atmel_spi_probe(struct platform_device *pdev) 910static int __devinit atmel_spi_probe(struct platform_device *pdev)
911{ 911{
912 struct resource *regs; 912 struct resource *regs;
913 int irq; 913 int irq;
@@ -1003,7 +1003,7 @@ out_free:
1003 return ret; 1003 return ret;
1004} 1004}
1005 1005
1006static int __exit atmel_spi_remove(struct platform_device *pdev) 1006static int __devexit atmel_spi_remove(struct platform_device *pdev)
1007{ 1007{
1008 struct spi_master *master = platform_get_drvdata(pdev); 1008 struct spi_master *master = platform_get_drvdata(pdev);
1009 struct atmel_spi *as = spi_master_get_devdata(master); 1009 struct atmel_spi *as = spi_master_get_devdata(master);
@@ -1072,6 +1072,7 @@ static struct platform_driver atmel_spi_driver = {
1072 }, 1072 },
1073 .suspend = atmel_spi_suspend, 1073 .suspend = atmel_spi_suspend,
1074 .resume = atmel_spi_resume, 1074 .resume = atmel_spi_resume,
1075 .probe = atmel_spi_probe,
1075 .remove = __exit_p(atmel_spi_remove), 1076 .remove = __exit_p(atmel_spi_remove),
1076}; 1077};
1077module_platform_driver(atmel_spi_driver); 1078module_platform_driver(atmel_spi_driver);
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
index e093d3ec41ba..0094c645ff0d 100644
--- a/drivers/spi/spi-gpio.c
+++ b/drivers/spi/spi-gpio.c
@@ -256,7 +256,7 @@ static void spi_gpio_cleanup(struct spi_device *spi)
256 spi_bitbang_cleanup(spi); 256 spi_bitbang_cleanup(spi);
257} 257}
258 258
259static int __init spi_gpio_alloc(unsigned pin, const char *label, bool is_in) 259static int __devinit spi_gpio_alloc(unsigned pin, const char *label, bool is_in)
260{ 260{
261 int value; 261 int value;
262 262
@@ -270,7 +270,7 @@ static int __init spi_gpio_alloc(unsigned pin, const char *label, bool is_in)
270 return value; 270 return value;
271} 271}
272 272
273static int __init 273static int __devinit
274spi_gpio_request(struct spi_gpio_platform_data *pdata, const char *label, 274spi_gpio_request(struct spi_gpio_platform_data *pdata, const char *label,
275 u16 *res_flags) 275 u16 *res_flags)
276{ 276{
diff --git a/drivers/spi/spi-nuc900.c b/drivers/spi/spi-nuc900.c
index e763254741c2..182e9c873822 100644
--- a/drivers/spi/spi-nuc900.c
+++ b/drivers/spi/spi-nuc900.c
@@ -8,6 +8,7 @@
8 * 8 *
9 */ 9 */
10 10
11#include <linux/module.h>
11#include <linux/init.h> 12#include <linux/init.h>
12#include <linux/spinlock.h> 13#include <linux/spinlock.h>
13#include <linux/workqueue.h> 14#include <linux/workqueue.h>
@@ -426,7 +427,7 @@ static int __devinit nuc900_spi_probe(struct platform_device *pdev)
426 goto err_clk; 427 goto err_clk;
427 } 428 }
428 429
429 mfp_set_groupg(&pdev->dev); 430 mfp_set_groupg(&pdev->dev, NULL);
430 nuc900_init_spi(hw); 431 nuc900_init_spi(hw);
431 432
432 err = spi_bitbang_start(&hw->bitbang); 433 err = spi_bitbang_start(&hw->bitbang);
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index f103e470cb63..5559b2299198 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -2184,6 +2184,12 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id)
2184 goto err_clk_prep; 2184 goto err_clk_prep;
2185 } 2185 }
2186 2186
2187 status = clk_enable(pl022->clk);
2188 if (status) {
2189 dev_err(&adev->dev, "could not enable SSP/SPI bus clock\n");
2190 goto err_no_clk_en;
2191 }
2192
2187 /* Disable SSP */ 2193 /* Disable SSP */
2188 writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)), 2194 writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)),
2189 SSP_CR1(pl022->virtbase)); 2195 SSP_CR1(pl022->virtbase));
@@ -2237,6 +2243,8 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id)
2237 2243
2238 free_irq(adev->irq[0], pl022); 2244 free_irq(adev->irq[0], pl022);
2239 err_no_irq: 2245 err_no_irq:
2246 clk_disable(pl022->clk);
2247 err_no_clk_en:
2240 clk_unprepare(pl022->clk); 2248 clk_unprepare(pl022->clk);
2241 err_clk_prep: 2249 err_clk_prep:
2242 clk_put(pl022->clk); 2250 clk_put(pl022->clk);
diff --git a/drivers/ssb/driver_pcicore.c b/drivers/ssb/driver_pcicore.c
index 84c934c0a545..520e8286db28 100644
--- a/drivers/ssb/driver_pcicore.c
+++ b/drivers/ssb/driver_pcicore.c
@@ -517,10 +517,14 @@ static void ssb_pcicore_pcie_setup_workarounds(struct ssb_pcicore *pc)
517 517
518static void __devinit ssb_pcicore_init_clientmode(struct ssb_pcicore *pc) 518static void __devinit ssb_pcicore_init_clientmode(struct ssb_pcicore *pc)
519{ 519{
520 ssb_pcicore_fix_sprom_core_index(pc); 520 struct ssb_device *pdev = pc->dev;
521 struct ssb_bus *bus = pdev->bus;
522
523 if (bus->bustype == SSB_BUSTYPE_PCI)
524 ssb_pcicore_fix_sprom_core_index(pc);
521 525
522 /* Disable PCI interrupts. */ 526 /* Disable PCI interrupts. */
523 ssb_write32(pc->dev, SSB_INTVEC, 0); 527 ssb_write32(pdev, SSB_INTVEC, 0);
524 528
525 /* Additional PCIe always once-executed workarounds */ 529 /* Additional PCIe always once-executed workarounds */
526 if (pc->dev->id.coreid == SSB_DEV_PCIE) { 530 if (pc->dev->id.coreid == SSB_DEV_PCIE) {
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index 21d8c1c16cd8..5e78c77d5a08 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -671,7 +671,7 @@ static int do_insnlist_ioctl(struct comedi_device *dev,
671 } 671 }
672 672
673 insns = 673 insns =
674 kmalloc(sizeof(struct comedi_insn) * insnlist.n_insns, GFP_KERNEL); 674 kcalloc(insnlist.n_insns, sizeof(struct comedi_insn), GFP_KERNEL);
675 if (!insns) { 675 if (!insns) {
676 DPRINTK("kmalloc failed\n"); 676 DPRINTK("kmalloc failed\n");
677 ret = -ENOMEM; 677 ret = -ENOMEM;
@@ -1432,7 +1432,21 @@ static int do_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
1432 return ret; 1432 return ret;
1433} 1433}
1434 1434
1435static void comedi_unmap(struct vm_area_struct *area) 1435
1436static void comedi_vm_open(struct vm_area_struct *area)
1437{
1438 struct comedi_async *async;
1439 struct comedi_device *dev;
1440
1441 async = area->vm_private_data;
1442 dev = async->subdevice->device;
1443
1444 mutex_lock(&dev->mutex);
1445 async->mmap_count++;
1446 mutex_unlock(&dev->mutex);
1447}
1448
1449static void comedi_vm_close(struct vm_area_struct *area)
1436{ 1450{
1437 struct comedi_async *async; 1451 struct comedi_async *async;
1438 struct comedi_device *dev; 1452 struct comedi_device *dev;
@@ -1446,15 +1460,13 @@ static void comedi_unmap(struct vm_area_struct *area)
1446} 1460}
1447 1461
1448static struct vm_operations_struct comedi_vm_ops = { 1462static struct vm_operations_struct comedi_vm_ops = {
1449 .close = comedi_unmap, 1463 .open = comedi_vm_open,
1464 .close = comedi_vm_close,
1450}; 1465};
1451 1466
1452static int comedi_mmap(struct file *file, struct vm_area_struct *vma) 1467static int comedi_mmap(struct file *file, struct vm_area_struct *vma)
1453{ 1468{
1454 const unsigned minor = iminor(file->f_dentry->d_inode); 1469 const unsigned minor = iminor(file->f_dentry->d_inode);
1455 struct comedi_device_file_info *dev_file_info =
1456 comedi_get_device_file_info(minor);
1457 struct comedi_device *dev = dev_file_info->device;
1458 struct comedi_async *async = NULL; 1470 struct comedi_async *async = NULL;
1459 unsigned long start = vma->vm_start; 1471 unsigned long start = vma->vm_start;
1460 unsigned long size; 1472 unsigned long size;
@@ -1462,6 +1474,15 @@ static int comedi_mmap(struct file *file, struct vm_area_struct *vma)
1462 int i; 1474 int i;
1463 int retval; 1475 int retval;
1464 struct comedi_subdevice *s; 1476 struct comedi_subdevice *s;
1477 struct comedi_device_file_info *dev_file_info;
1478 struct comedi_device *dev;
1479
1480 dev_file_info = comedi_get_device_file_info(minor);
1481 if (dev_file_info == NULL)
1482 return -ENODEV;
1483 dev = dev_file_info->device;
1484 if (dev == NULL)
1485 return -ENODEV;
1465 1486
1466 mutex_lock(&dev->mutex); 1487 mutex_lock(&dev->mutex);
1467 if (!dev->attached) { 1488 if (!dev->attached) {
@@ -1528,11 +1549,17 @@ static unsigned int comedi_poll(struct file *file, poll_table * wait)
1528{ 1549{
1529 unsigned int mask = 0; 1550 unsigned int mask = 0;
1530 const unsigned minor = iminor(file->f_dentry->d_inode); 1551 const unsigned minor = iminor(file->f_dentry->d_inode);
1531 struct comedi_device_file_info *dev_file_info =
1532 comedi_get_device_file_info(minor);
1533 struct comedi_device *dev = dev_file_info->device;
1534 struct comedi_subdevice *read_subdev; 1552 struct comedi_subdevice *read_subdev;
1535 struct comedi_subdevice *write_subdev; 1553 struct comedi_subdevice *write_subdev;
1554 struct comedi_device_file_info *dev_file_info;
1555 struct comedi_device *dev;
1556 dev_file_info = comedi_get_device_file_info(minor);
1557
1558 if (dev_file_info == NULL)
1559 return -ENODEV;
1560 dev = dev_file_info->device;
1561 if (dev == NULL)
1562 return -ENODEV;
1536 1563
1537 mutex_lock(&dev->mutex); 1564 mutex_lock(&dev->mutex);
1538 if (!dev->attached) { 1565 if (!dev->attached) {
@@ -1578,9 +1605,15 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
1578 int n, m, count = 0, retval = 0; 1605 int n, m, count = 0, retval = 0;
1579 DECLARE_WAITQUEUE(wait, current); 1606 DECLARE_WAITQUEUE(wait, current);
1580 const unsigned minor = iminor(file->f_dentry->d_inode); 1607 const unsigned minor = iminor(file->f_dentry->d_inode);
1581 struct comedi_device_file_info *dev_file_info = 1608 struct comedi_device_file_info *dev_file_info;
1582 comedi_get_device_file_info(minor); 1609 struct comedi_device *dev;
1583 struct comedi_device *dev = dev_file_info->device; 1610 dev_file_info = comedi_get_device_file_info(minor);
1611
1612 if (dev_file_info == NULL)
1613 return -ENODEV;
1614 dev = dev_file_info->device;
1615 if (dev == NULL)
1616 return -ENODEV;
1584 1617
1585 if (!dev->attached) { 1618 if (!dev->attached) {
1586 DPRINTK("no driver configured on comedi%i\n", dev->minor); 1619 DPRINTK("no driver configured on comedi%i\n", dev->minor);
@@ -1640,11 +1673,11 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
1640 retval = -EAGAIN; 1673 retval = -EAGAIN;
1641 break; 1674 break;
1642 } 1675 }
1676 schedule();
1643 if (signal_pending(current)) { 1677 if (signal_pending(current)) {
1644 retval = -ERESTARTSYS; 1678 retval = -ERESTARTSYS;
1645 break; 1679 break;
1646 } 1680 }
1647 schedule();
1648 if (!s->busy) 1681 if (!s->busy)
1649 break; 1682 break;
1650 if (s->busy != file) { 1683 if (s->busy != file) {
@@ -1683,9 +1716,15 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
1683 int n, m, count = 0, retval = 0; 1716 int n, m, count = 0, retval = 0;
1684 DECLARE_WAITQUEUE(wait, current); 1717 DECLARE_WAITQUEUE(wait, current);
1685 const unsigned minor = iminor(file->f_dentry->d_inode); 1718 const unsigned minor = iminor(file->f_dentry->d_inode);
1686 struct comedi_device_file_info *dev_file_info = 1719 struct comedi_device_file_info *dev_file_info;
1687 comedi_get_device_file_info(minor); 1720 struct comedi_device *dev;
1688 struct comedi_device *dev = dev_file_info->device; 1721 dev_file_info = comedi_get_device_file_info(minor);
1722
1723 if (dev_file_info == NULL)
1724 return -ENODEV;
1725 dev = dev_file_info->device;
1726 if (dev == NULL)
1727 return -ENODEV;
1689 1728
1690 if (!dev->attached) { 1729 if (!dev->attached) {
1691 DPRINTK("no driver configured on comedi%i\n", dev->minor); 1730 DPRINTK("no driver configured on comedi%i\n", dev->minor);
@@ -1741,11 +1780,11 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
1741 retval = -EAGAIN; 1780 retval = -EAGAIN;
1742 break; 1781 break;
1743 } 1782 }
1783 schedule();
1744 if (signal_pending(current)) { 1784 if (signal_pending(current)) {
1745 retval = -ERESTARTSYS; 1785 retval = -ERESTARTSYS;
1746 break; 1786 break;
1747 } 1787 }
1748 schedule();
1749 if (!s->busy) { 1788 if (!s->busy) {
1750 retval = 0; 1789 retval = 0;
1751 break; 1790 break;
@@ -1885,11 +1924,17 @@ ok:
1885static int comedi_close(struct inode *inode, struct file *file) 1924static int comedi_close(struct inode *inode, struct file *file)
1886{ 1925{
1887 const unsigned minor = iminor(inode); 1926 const unsigned minor = iminor(inode);
1888 struct comedi_device_file_info *dev_file_info =
1889 comedi_get_device_file_info(minor);
1890 struct comedi_device *dev = dev_file_info->device;
1891 struct comedi_subdevice *s = NULL; 1927 struct comedi_subdevice *s = NULL;
1892 int i; 1928 int i;
1929 struct comedi_device_file_info *dev_file_info;
1930 struct comedi_device *dev;
1931 dev_file_info = comedi_get_device_file_info(minor);
1932
1933 if (dev_file_info == NULL)
1934 return -ENODEV;
1935 dev = dev_file_info->device;
1936 if (dev == NULL)
1937 return -ENODEV;
1893 1938
1894 mutex_lock(&dev->mutex); 1939 mutex_lock(&dev->mutex);
1895 1940
@@ -1923,10 +1968,15 @@ static int comedi_close(struct inode *inode, struct file *file)
1923static int comedi_fasync(int fd, struct file *file, int on) 1968static int comedi_fasync(int fd, struct file *file, int on)
1924{ 1969{
1925 const unsigned minor = iminor(file->f_dentry->d_inode); 1970 const unsigned minor = iminor(file->f_dentry->d_inode);
1926 struct comedi_device_file_info *dev_file_info = 1971 struct comedi_device_file_info *dev_file_info;
1927 comedi_get_device_file_info(minor); 1972 struct comedi_device *dev;
1973 dev_file_info = comedi_get_device_file_info(minor);
1928 1974
1929 struct comedi_device *dev = dev_file_info->device; 1975 if (dev_file_info == NULL)
1976 return -ENODEV;
1977 dev = dev_file_info->device;
1978 if (dev == NULL)
1979 return -ENODEV;
1930 1980
1931 return fasync_helper(fd, file, on, &dev->async_queue); 1981 return fasync_helper(fd, file, on, &dev->async_queue);
1932} 1982}
diff --git a/drivers/staging/comedi/drivers/usbduxsigma.c b/drivers/staging/comedi/drivers/usbduxsigma.c
index a8fea9a91733..6144afb8cbaa 100644
--- a/drivers/staging/comedi/drivers/usbduxsigma.c
+++ b/drivers/staging/comedi/drivers/usbduxsigma.c
@@ -1,4 +1,4 @@
1#define DRIVER_VERSION "v0.5" 1#define DRIVER_VERSION "v0.6"
2#define DRIVER_AUTHOR "Bernd Porr, BerndPorr@f2s.com" 2#define DRIVER_AUTHOR "Bernd Porr, BerndPorr@f2s.com"
3#define DRIVER_DESC "Stirling/ITL USB-DUX SIGMA -- Bernd.Porr@f2s.com" 3#define DRIVER_DESC "Stirling/ITL USB-DUX SIGMA -- Bernd.Porr@f2s.com"
4/* 4/*
@@ -25,7 +25,7 @@ Driver: usbduxsigma
25Description: University of Stirling USB DAQ & INCITE Technology Limited 25Description: University of Stirling USB DAQ & INCITE Technology Limited
26Devices: [ITL] USB-DUX (usbduxsigma.o) 26Devices: [ITL] USB-DUX (usbduxsigma.o)
27Author: Bernd Porr <BerndPorr@f2s.com> 27Author: Bernd Porr <BerndPorr@f2s.com>
28Updated: 21 Jul 2011 28Updated: 8 Nov 2011
29Status: testing 29Status: testing
30*/ 30*/
31/* 31/*
@@ -44,6 +44,7 @@ Status: testing
44 * 0.3: proper vendor ID and driver name 44 * 0.3: proper vendor ID and driver name
45 * 0.4: fixed D/A voltage range 45 * 0.4: fixed D/A voltage range
46 * 0.5: various bug fixes, health check at startup 46 * 0.5: various bug fixes, health check at startup
47 * 0.6: corrected wrong input range
47 */ 48 */
48 49
49/* generates loads of debug info */ 50/* generates loads of debug info */
@@ -175,7 +176,7 @@ Status: testing
175/* comedi constants */ 176/* comedi constants */
176static const struct comedi_lrange range_usbdux_ai_range = { 1, { 177static const struct comedi_lrange range_usbdux_ai_range = { 1, {
177 BIP_RANGE 178 BIP_RANGE
178 (2.65) 179 (2.65/2.0)
179 } 180 }
180}; 181};
181 182
diff --git a/drivers/staging/et131x/Kconfig b/drivers/staging/et131x/Kconfig
index 9e1864c6dfd0..8190f2aaf53b 100644
--- a/drivers/staging/et131x/Kconfig
+++ b/drivers/staging/et131x/Kconfig
@@ -1,6 +1,7 @@
1config ET131X 1config ET131X
2 tristate "Agere ET-1310 Gigabit Ethernet support" 2 tristate "Agere ET-1310 Gigabit Ethernet support"
3 depends on PCI 3 depends on PCI && NET && NETDEVICES
4 select PHYLIB
4 default n 5 default n
5 ---help--- 6 ---help---
6 This driver supports Agere ET-1310 ethernet adapters. 7 This driver supports Agere ET-1310 ethernet adapters.
diff --git a/drivers/staging/et131x/et131x.c b/drivers/staging/et131x/et131x.c
index f5f44a02456f..0c1c6ca8c379 100644
--- a/drivers/staging/et131x/et131x.c
+++ b/drivers/staging/et131x/et131x.c
@@ -4469,6 +4469,12 @@ static int et131x_resume(struct device *dev)
4469 return 0; 4469 return 0;
4470} 4470}
4471 4471
4472static SIMPLE_DEV_PM_OPS(et131x_pm_ops, et131x_suspend, et131x_resume);
4473#define ET131X_PM_OPS (&et131x_pm_ops)
4474#else
4475#define ET131X_PM_OPS NULL
4476#endif
4477
4472/* ISR functions */ 4478/* ISR functions */
4473 4479
4474/** 4480/**
@@ -5470,12 +5476,6 @@ err_out:
5470 return result; 5476 return result;
5471} 5477}
5472 5478
5473static SIMPLE_DEV_PM_OPS(et131x_pm_ops, et131x_suspend, et131x_resume);
5474#define ET131X_PM_OPS (&et131x_pm_ops)
5475#else
5476#define ET131X_PM_OPS NULL
5477#endif
5478
5479static DEFINE_PCI_DEVICE_TABLE(et131x_pci_table) = { 5479static DEFINE_PCI_DEVICE_TABLE(et131x_pci_table) = {
5480 { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_GIG), 0UL}, 5480 { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_GIG), 0UL},
5481 { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_FAST), 0UL}, 5481 { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_FAST), 0UL},
diff --git a/drivers/staging/iio/industrialio-core.c b/drivers/staging/iio/industrialio-core.c
index 326e967d54ef..aec9311b108c 100644
--- a/drivers/staging/iio/industrialio-core.c
+++ b/drivers/staging/iio/industrialio-core.c
@@ -242,19 +242,26 @@ static const struct file_operations iio_event_chrdev_fileops = {
242 242
243static int iio_event_getfd(struct iio_dev *indio_dev) 243static int iio_event_getfd(struct iio_dev *indio_dev)
244{ 244{
245 if (indio_dev->event_interface == NULL) 245 struct iio_event_interface *ev_int = indio_dev->event_interface;
246 int fd;
247
248 if (ev_int == NULL)
246 return -ENODEV; 249 return -ENODEV;
247 250
248 mutex_lock(&indio_dev->event_interface->event_list_lock); 251 mutex_lock(&ev_int->event_list_lock);
249 if (test_and_set_bit(IIO_BUSY_BIT_POS, 252 if (test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
250 &indio_dev->event_interface->flags)) { 253 mutex_unlock(&ev_int->event_list_lock);
251 mutex_unlock(&indio_dev->event_interface->event_list_lock);
252 return -EBUSY; 254 return -EBUSY;
253 } 255 }
254 mutex_unlock(&indio_dev->event_interface->event_list_lock); 256 mutex_unlock(&ev_int->event_list_lock);
255 return anon_inode_getfd("iio:event", 257 fd = anon_inode_getfd("iio:event",
256 &iio_event_chrdev_fileops, 258 &iio_event_chrdev_fileops, ev_int, O_RDONLY);
257 indio_dev->event_interface, O_RDONLY); 259 if (fd < 0) {
260 mutex_lock(&ev_int->event_list_lock);
261 clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
262 mutex_unlock(&ev_int->event_list_lock);
263 }
264 return fd;
258} 265}
259 266
260static int __init iio_init(void) 267static int __init iio_init(void)
diff --git a/drivers/staging/media/as102/as102_drv.c b/drivers/staging/media/as102/as102_drv.c
index d335c7d6fa0f..828526d4c289 100644
--- a/drivers/staging/media/as102/as102_drv.c
+++ b/drivers/staging/media/as102/as102_drv.c
@@ -32,8 +32,8 @@
32#include "as102_fw.h" 32#include "as102_fw.h"
33#include "dvbdev.h" 33#include "dvbdev.h"
34 34
35int debug; 35int as102_debug;
36module_param_named(debug, debug, int, 0644); 36module_param_named(debug, as102_debug, int, 0644);
37MODULE_PARM_DESC(debug, "Turn on/off debugging (default: off)"); 37MODULE_PARM_DESC(debug, "Turn on/off debugging (default: off)");
38 38
39int dual_tuner; 39int dual_tuner;
diff --git a/drivers/staging/media/as102/as102_drv.h b/drivers/staging/media/as102/as102_drv.h
index bcda635b5a99..fd33f5a12dcc 100644
--- a/drivers/staging/media/as102/as102_drv.h
+++ b/drivers/staging/media/as102/as102_drv.h
@@ -37,7 +37,8 @@ extern struct spi_driver as102_spi_driver;
37#define DRIVER_FULL_NAME "Abilis Systems as10x usb driver" 37#define DRIVER_FULL_NAME "Abilis Systems as10x usb driver"
38#define DRIVER_NAME "as10x_usb" 38#define DRIVER_NAME "as10x_usb"
39 39
40extern int debug; 40extern int as102_debug;
41#define debug as102_debug
41 42
42#define dprintk(debug, args...) \ 43#define dprintk(debug, args...) \
43 do { if (debug) { \ 44 do { if (debug) { \
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
index b445cd63f901..2542c3743904 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -275,7 +275,7 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
275 CVM_OCT_SKB_CB(skb)[0] = hw_buffer.u64; 275 CVM_OCT_SKB_CB(skb)[0] = hw_buffer.u64;
276 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 276 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
277 struct skb_frag_struct *fs = skb_shinfo(skb)->frags + i; 277 struct skb_frag_struct *fs = skb_shinfo(skb)->frags + i;
278 hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)(page_address(fs->page) + fs->page_offset)); 278 hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)(page_address(fs->page.p) + fs->page_offset));
279 hw_buffer.s.size = fs->size; 279 hw_buffer.s.size = fs->size;
280 CVM_OCT_SKB_CB(skb)[i + 1] = hw_buffer.u64; 280 CVM_OCT_SKB_CB(skb)[i + 1] = hw_buffer.u64;
281 } 281 }
diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
index fb2e89c3056c..5385da2e9cdb 100644
--- a/drivers/staging/rtl8712/usb_intf.c
+++ b/drivers/staging/rtl8712/usb_intf.c
@@ -89,6 +89,7 @@ static struct usb_device_id rtl871x_usb_id_tbl[] = {
89 {USB_DEVICE(0x0DF6, 0x0045)}, 89 {USB_DEVICE(0x0DF6, 0x0045)},
90 {USB_DEVICE(0x0DF6, 0x0059)}, /* 11n mode disable */ 90 {USB_DEVICE(0x0DF6, 0x0059)}, /* 11n mode disable */
91 {USB_DEVICE(0x0DF6, 0x004B)}, 91 {USB_DEVICE(0x0DF6, 0x004B)},
92 {USB_DEVICE(0x0DF6, 0x005D)},
92 {USB_DEVICE(0x0DF6, 0x0063)}, 93 {USB_DEVICE(0x0DF6, 0x0063)},
93 /* Sweex */ 94 /* Sweex */
94 {USB_DEVICE(0x177F, 0x0154)}, 95 {USB_DEVICE(0x177F, 0x0154)},
diff --git a/drivers/staging/rts_pstor/rtsx.c b/drivers/staging/rts_pstor/rtsx.c
index 480b0ed2e4de..115635f95024 100644
--- a/drivers/staging/rts_pstor/rtsx.c
+++ b/drivers/staging/rts_pstor/rtsx.c
@@ -1021,6 +1021,7 @@ static int __devinit rtsx_probe(struct pci_dev *pci,
1021 th = kthread_create(rtsx_scan_thread, dev, "rtsx-scan"); 1021 th = kthread_create(rtsx_scan_thread, dev, "rtsx-scan");
1022 if (IS_ERR(th)) { 1022 if (IS_ERR(th)) {
1023 printk(KERN_ERR "Unable to start the device-scanning thread\n"); 1023 printk(KERN_ERR "Unable to start the device-scanning thread\n");
1024 complete(&dev->scanning_done);
1024 quiesce_and_remove_host(dev); 1025 quiesce_and_remove_host(dev);
1025 err = PTR_ERR(th); 1026 err = PTR_ERR(th);
1026 goto errout; 1027 goto errout;
diff --git a/drivers/staging/slicoss/Kconfig b/drivers/staging/slicoss/Kconfig
index 5cde96b2e6e1..5c2a15b42dfe 100644
--- a/drivers/staging/slicoss/Kconfig
+++ b/drivers/staging/slicoss/Kconfig
@@ -1,6 +1,6 @@
1config SLICOSS 1config SLICOSS
2 tristate "Alacritech Gigabit IS-NIC support" 2 tristate "Alacritech Gigabit IS-NIC support"
3 depends on PCI && X86 3 depends on PCI && X86 && NET
4 default n 4 default n
5 help 5 help
6 This driver supports Alacritech's IS-NIC gigabit ethernet cards. 6 This driver supports Alacritech's IS-NIC gigabit ethernet cards.
diff --git a/drivers/staging/tidspbridge/core/dsp-clock.c b/drivers/staging/tidspbridge/core/dsp-clock.c
index 3d1279c424a8..7eb56178fb64 100644
--- a/drivers/staging/tidspbridge/core/dsp-clock.c
+++ b/drivers/staging/tidspbridge/core/dsp-clock.c
@@ -54,6 +54,7 @@
54 54
55/* Bridge GPT id (1 - 4), DM Timer id (5 - 8) */ 55/* Bridge GPT id (1 - 4), DM Timer id (5 - 8) */
56#define DMT_ID(id) ((id) + 4) 56#define DMT_ID(id) ((id) + 4)
57#define DM_TIMER_CLOCKS 4
57 58
58/* Bridge MCBSP id (6 - 10), OMAP Mcbsp id (0 - 4) */ 59/* Bridge MCBSP id (6 - 10), OMAP Mcbsp id (0 - 4) */
59#define MCBSP_ID(id) ((id) - 6) 60#define MCBSP_ID(id) ((id) - 6)
@@ -114,8 +115,13 @@ static s8 get_clk_type(u8 id)
114 */ 115 */
115void dsp_clk_exit(void) 116void dsp_clk_exit(void)
116{ 117{
118 int i;
119
117 dsp_clock_disable_all(dsp_clocks); 120 dsp_clock_disable_all(dsp_clocks);
118 121
122 for (i = 0; i < DM_TIMER_CLOCKS; i++)
123 omap_dm_timer_free(timer[i]);
124
119 clk_put(iva2_clk); 125 clk_put(iva2_clk);
120 clk_put(ssi.sst_fck); 126 clk_put(ssi.sst_fck);
121 clk_put(ssi.ssr_fck); 127 clk_put(ssi.ssr_fck);
@@ -130,9 +136,13 @@ void dsp_clk_exit(void)
130void dsp_clk_init(void) 136void dsp_clk_init(void)
131{ 137{
132 static struct platform_device dspbridge_device; 138 static struct platform_device dspbridge_device;
139 int i, id;
133 140
134 dspbridge_device.dev.bus = &platform_bus_type; 141 dspbridge_device.dev.bus = &platform_bus_type;
135 142
143 for (i = 0, id = 5; i < DM_TIMER_CLOCKS; i++, id++)
144 timer[i] = omap_dm_timer_request_specific(id);
145
136 iva2_clk = clk_get(&dspbridge_device.dev, "iva2_ck"); 146 iva2_clk = clk_get(&dspbridge_device.dev, "iva2_ck");
137 if (IS_ERR(iva2_clk)) 147 if (IS_ERR(iva2_clk))
138 dev_err(bridge, "failed to get iva2 clock %p\n", iva2_clk); 148 dev_err(bridge, "failed to get iva2 clock %p\n", iva2_clk);
@@ -204,8 +214,7 @@ int dsp_clk_enable(enum dsp_clk_id clk_id)
204 clk_enable(iva2_clk); 214 clk_enable(iva2_clk);
205 break; 215 break;
206 case GPT_CLK: 216 case GPT_CLK:
207 timer[clk_id - 1] = 217 status = omap_dm_timer_start(timer[clk_id - 1]);
208 omap_dm_timer_request_specific(DMT_ID(clk_id));
209 break; 218 break;
210#ifdef CONFIG_OMAP_MCBSP 219#ifdef CONFIG_OMAP_MCBSP
211 case MCBSP_CLK: 220 case MCBSP_CLK:
@@ -281,7 +290,7 @@ int dsp_clk_disable(enum dsp_clk_id clk_id)
281 clk_disable(iva2_clk); 290 clk_disable(iva2_clk);
282 break; 291 break;
283 case GPT_CLK: 292 case GPT_CLK:
284 omap_dm_timer_free(timer[clk_id - 1]); 293 status = omap_dm_timer_stop(timer[clk_id - 1]);
285 break; 294 break;
286#ifdef CONFIG_OMAP_MCBSP 295#ifdef CONFIG_OMAP_MCBSP
287 case MCBSP_CLK: 296 case MCBSP_CLK:
diff --git a/drivers/staging/tidspbridge/rmgr/drv_interface.c b/drivers/staging/tidspbridge/rmgr/drv_interface.c
index c43c7e3421c8..76cfc6edecd9 100644
--- a/drivers/staging/tidspbridge/rmgr/drv_interface.c
+++ b/drivers/staging/tidspbridge/rmgr/drv_interface.c
@@ -24,11 +24,7 @@
24#include <linux/types.h> 24#include <linux/types.h>
25#include <linux/platform_device.h> 25#include <linux/platform_device.h>
26#include <linux/pm.h> 26#include <linux/pm.h>
27
28#ifdef MODULE
29#include <linux/module.h> 27#include <linux/module.h>
30#endif
31
32#include <linux/device.h> 28#include <linux/device.h>
33#include <linux/init.h> 29#include <linux/init.h>
34#include <linux/moduleparam.h> 30#include <linux/moduleparam.h>
diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
index 09c44abb89e8..3872b8cccdcf 100644
--- a/drivers/staging/usbip/vhci_rx.c
+++ b/drivers/staging/usbip/vhci_rx.c
@@ -68,6 +68,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
68{ 68{
69 struct usbip_device *ud = &vdev->ud; 69 struct usbip_device *ud = &vdev->ud;
70 struct urb *urb; 70 struct urb *urb;
71 unsigned long flags;
71 72
72 spin_lock(&vdev->priv_lock); 73 spin_lock(&vdev->priv_lock);
73 urb = pickup_urb_and_free_priv(vdev, pdu->base.seqnum); 74 urb = pickup_urb_and_free_priv(vdev, pdu->base.seqnum);
@@ -101,9 +102,9 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
101 102
102 usbip_dbg_vhci_rx("now giveback urb %p\n", urb); 103 usbip_dbg_vhci_rx("now giveback urb %p\n", urb);
103 104
104 spin_lock(&the_controller->lock); 105 spin_lock_irqsave(&the_controller->lock, flags);
105 usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb); 106 usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb);
106 spin_unlock(&the_controller->lock); 107 spin_unlock_irqrestore(&the_controller->lock, flags);
107 108
108 usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb, urb->status); 109 usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb, urb->status);
109 110
@@ -141,6 +142,7 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
141{ 142{
142 struct vhci_unlink *unlink; 143 struct vhci_unlink *unlink;
143 struct urb *urb; 144 struct urb *urb;
145 unsigned long flags;
144 146
145 usbip_dump_header(pdu); 147 usbip_dump_header(pdu);
146 148
@@ -170,9 +172,9 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
170 urb->status = pdu->u.ret_unlink.status; 172 urb->status = pdu->u.ret_unlink.status;
171 pr_info("urb->status %d\n", urb->status); 173 pr_info("urb->status %d\n", urb->status);
172 174
173 spin_lock(&the_controller->lock); 175 spin_lock_irqsave(&the_controller->lock, flags);
174 usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb); 176 usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb);
175 spin_unlock(&the_controller->lock); 177 spin_unlock_irqrestore(&the_controller->lock, flags);
176 178
177 usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb, 179 usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb,
178 urb->status); 180 urb->status);
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 0fd96c10271d..8599545cdf9e 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -614,13 +614,12 @@ int iscsit_add_reject(
614 hdr = (struct iscsi_reject *) cmd->pdu; 614 hdr = (struct iscsi_reject *) cmd->pdu;
615 hdr->reason = reason; 615 hdr->reason = reason;
616 616
617 cmd->buf_ptr = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL); 617 cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
618 if (!cmd->buf_ptr) { 618 if (!cmd->buf_ptr) {
619 pr_err("Unable to allocate memory for cmd->buf_ptr\n"); 619 pr_err("Unable to allocate memory for cmd->buf_ptr\n");
620 iscsit_release_cmd(cmd); 620 iscsit_release_cmd(cmd);
621 return -1; 621 return -1;
622 } 622 }
623 memcpy(cmd->buf_ptr, buf, ISCSI_HDR_LEN);
624 623
625 spin_lock_bh(&conn->cmd_lock); 624 spin_lock_bh(&conn->cmd_lock);
626 list_add_tail(&cmd->i_list, &conn->conn_cmd_list); 625 list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
@@ -661,13 +660,12 @@ int iscsit_add_reject_from_cmd(
661 hdr = (struct iscsi_reject *) cmd->pdu; 660 hdr = (struct iscsi_reject *) cmd->pdu;
662 hdr->reason = reason; 661 hdr->reason = reason;
663 662
664 cmd->buf_ptr = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL); 663 cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
665 if (!cmd->buf_ptr) { 664 if (!cmd->buf_ptr) {
666 pr_err("Unable to allocate memory for cmd->buf_ptr\n"); 665 pr_err("Unable to allocate memory for cmd->buf_ptr\n");
667 iscsit_release_cmd(cmd); 666 iscsit_release_cmd(cmd);
668 return -1; 667 return -1;
669 } 668 }
670 memcpy(cmd->buf_ptr, buf, ISCSI_HDR_LEN);
671 669
672 if (add_to_conn) { 670 if (add_to_conn) {
673 spin_lock_bh(&conn->cmd_lock); 671 spin_lock_bh(&conn->cmd_lock);
@@ -1017,11 +1015,6 @@ done:
1017 " non-existent or non-exported iSCSI LUN:" 1015 " non-existent or non-exported iSCSI LUN:"
1018 " 0x%016Lx\n", get_unaligned_le64(&hdr->lun)); 1016 " 0x%016Lx\n", get_unaligned_le64(&hdr->lun));
1019 } 1017 }
1020 if (ret == PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES)
1021 return iscsit_add_reject_from_cmd(
1022 ISCSI_REASON_BOOKMARK_NO_RESOURCES,
1023 1, 1, buf, cmd);
1024
1025 send_check_condition = 1; 1018 send_check_condition = 1;
1026 goto attach_cmd; 1019 goto attach_cmd;
1027 } 1020 }
@@ -1044,6 +1037,8 @@ done:
1044 */ 1037 */
1045 send_check_condition = 1; 1038 send_check_condition = 1;
1046 } else { 1039 } else {
1040 cmd->data_length = cmd->se_cmd.data_length;
1041
1047 if (iscsit_decide_list_to_build(cmd, payload_length) < 0) 1042 if (iscsit_decide_list_to_build(cmd, payload_length) < 0)
1048 return iscsit_add_reject_from_cmd( 1043 return iscsit_add_reject_from_cmd(
1049 ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1044 ISCSI_REASON_BOOKMARK_NO_RESOURCES,
@@ -1123,7 +1118,7 @@ attach_cmd:
1123 * the backend memory allocation. 1118 * the backend memory allocation.
1124 */ 1119 */
1125 ret = transport_generic_new_cmd(&cmd->se_cmd); 1120 ret = transport_generic_new_cmd(&cmd->se_cmd);
1126 if ((ret < 0) || (cmd->se_cmd.se_cmd_flags & SCF_SE_CMD_FAILED)) { 1121 if (ret < 0) {
1127 immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION; 1122 immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
1128 dump_immediate_data = 1; 1123 dump_immediate_data = 1;
1129 goto after_immediate_data; 1124 goto after_immediate_data;
@@ -1341,7 +1336,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
1341 1336
1342 spin_lock_irqsave(&se_cmd->t_state_lock, flags); 1337 spin_lock_irqsave(&se_cmd->t_state_lock, flags);
1343 if (!(se_cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) || 1338 if (!(se_cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) ||
1344 (se_cmd->se_cmd_flags & SCF_SE_CMD_FAILED)) 1339 (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION))
1345 dump_unsolicited_data = 1; 1340 dump_unsolicited_data = 1;
1346 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); 1341 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
1347 1342
@@ -2513,10 +2508,10 @@ static int iscsit_send_data_in(
2513 if (hdr->flags & ISCSI_FLAG_DATA_STATUS) { 2508 if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
2514 if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) { 2509 if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
2515 hdr->flags |= ISCSI_FLAG_DATA_OVERFLOW; 2510 hdr->flags |= ISCSI_FLAG_DATA_OVERFLOW;
2516 hdr->residual_count = cpu_to_be32(cmd->residual_count); 2511 hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
2517 } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) { 2512 } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
2518 hdr->flags |= ISCSI_FLAG_DATA_UNDERFLOW; 2513 hdr->flags |= ISCSI_FLAG_DATA_UNDERFLOW;
2519 hdr->residual_count = cpu_to_be32(cmd->residual_count); 2514 hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
2520 } 2515 }
2521 } 2516 }
2522 hton24(hdr->dlength, datain.length); 2517 hton24(hdr->dlength, datain.length);
@@ -3018,10 +3013,10 @@ static int iscsit_send_status(
3018 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 3013 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
3019 if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) { 3014 if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
3020 hdr->flags |= ISCSI_FLAG_CMD_OVERFLOW; 3015 hdr->flags |= ISCSI_FLAG_CMD_OVERFLOW;
3021 hdr->residual_count = cpu_to_be32(cmd->residual_count); 3016 hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
3022 } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) { 3017 } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
3023 hdr->flags |= ISCSI_FLAG_CMD_UNDERFLOW; 3018 hdr->flags |= ISCSI_FLAG_CMD_UNDERFLOW;
3024 hdr->residual_count = cpu_to_be32(cmd->residual_count); 3019 hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
3025 } 3020 }
3026 hdr->response = cmd->iscsi_response; 3021 hdr->response = cmd->iscsi_response;
3027 hdr->cmd_status = cmd->se_cmd.scsi_status; 3022 hdr->cmd_status = cmd->se_cmd.scsi_status;
@@ -3133,6 +3128,7 @@ static int iscsit_send_task_mgt_rsp(
3133 hdr = (struct iscsi_tm_rsp *) cmd->pdu; 3128 hdr = (struct iscsi_tm_rsp *) cmd->pdu;
3134 memset(hdr, 0, ISCSI_HDR_LEN); 3129 memset(hdr, 0, ISCSI_HDR_LEN);
3135 hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP; 3130 hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
3131 hdr->flags = ISCSI_FLAG_CMD_FINAL;
3136 hdr->response = iscsit_convert_tcm_tmr_rsp(se_tmr); 3132 hdr->response = iscsit_convert_tcm_tmr_rsp(se_tmr);
3137 hdr->itt = cpu_to_be32(cmd->init_task_tag); 3133 hdr->itt = cpu_to_be32(cmd->init_task_tag);
3138 cmd->stat_sn = conn->stat_sn++; 3134 cmd->stat_sn = conn->stat_sn++;
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
index beb39469e7f1..1cd6ce373b83 100644
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -30,9 +30,11 @@
30 30
31static int chap_string_to_hex(unsigned char *dst, unsigned char *src, int len) 31static int chap_string_to_hex(unsigned char *dst, unsigned char *src, int len)
32{ 32{
33 int j = DIV_ROUND_UP(len, 2); 33 int j = DIV_ROUND_UP(len, 2), rc;
34 34
35 hex2bin(dst, src, j); 35 rc = hex2bin(dst, src, j);
36 if (rc < 0)
37 pr_debug("CHAP string contains non hex digit symbols\n");
36 38
37 dst[j] = '\0'; 39 dst[j] = '\0';
38 return j; 40 return j;
diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
index 3723d90d5ae5..f1a02dad05a0 100644
--- a/drivers/target/iscsi/iscsi_target_core.h
+++ b/drivers/target/iscsi/iscsi_target_core.h
@@ -398,7 +398,6 @@ struct iscsi_cmd {
398 u32 pdu_send_order; 398 u32 pdu_send_order;
399 /* Current struct iscsi_pdu in struct iscsi_cmd->pdu_list */ 399 /* Current struct iscsi_pdu in struct iscsi_cmd->pdu_list */
400 u32 pdu_start; 400 u32 pdu_start;
401 u32 residual_count;
402 /* Next struct iscsi_seq to send in struct iscsi_cmd->seq_list */ 401 /* Next struct iscsi_seq to send in struct iscsi_cmd->seq_list */
403 u32 seq_send_order; 402 u32 seq_send_order;
404 /* Number of struct iscsi_seq in struct iscsi_cmd->seq_list */ 403 /* Number of struct iscsi_seq in struct iscsi_cmd->seq_list */
@@ -535,7 +534,6 @@ struct iscsi_conn {
535 atomic_t connection_exit; 534 atomic_t connection_exit;
536 atomic_t connection_recovery; 535 atomic_t connection_recovery;
537 atomic_t connection_reinstatement; 536 atomic_t connection_reinstatement;
538 atomic_t connection_wait;
539 atomic_t connection_wait_rcfr; 537 atomic_t connection_wait_rcfr;
540 atomic_t sleep_on_conn_wait_comp; 538 atomic_t sleep_on_conn_wait_comp;
541 atomic_t transport_failed; 539 atomic_t transport_failed;
@@ -643,7 +641,6 @@ struct iscsi_session {
643 atomic_t session_reinstatement; 641 atomic_t session_reinstatement;
644 atomic_t session_stop_active; 642 atomic_t session_stop_active;
645 atomic_t sleep_on_sess_wait_comp; 643 atomic_t sleep_on_sess_wait_comp;
646 atomic_t transport_wait_cmds;
647 /* connection list */ 644 /* connection list */
648 struct list_head sess_conn_list; 645 struct list_head sess_conn_list;
649 struct list_head cr_active_list; 646 struct list_head cr_active_list;
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
index c4c68da3e500..101b1beb3bca 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.c
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -938,8 +938,7 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
938 * handle the SCF_SCSI_RESERVATION_CONFLICT case here as well. 938 * handle the SCF_SCSI_RESERVATION_CONFLICT case here as well.
939 */ 939 */
940 if (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION) { 940 if (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION) {
941 if (se_cmd->se_cmd_flags & 941 if (se_cmd->scsi_sense_reason == TCM_RESERVATION_CONFLICT) {
942 SCF_SCSI_RESERVATION_CONFLICT) {
943 cmd->i_state = ISTATE_SEND_STATUS; 942 cmd->i_state = ISTATE_SEND_STATUS;
944 spin_unlock_bh(&cmd->istate_lock); 943 spin_unlock_bh(&cmd->istate_lock);
945 iscsit_add_cmd_to_response_queue(cmd, cmd->conn, 944 iscsit_add_cmd_to_response_queue(cmd, cmd->conn,
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index daad362a93ce..d734bdec24f9 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -224,7 +224,7 @@ static int iscsi_login_zero_tsih_s1(
224 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 224 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
225 ISCSI_LOGIN_STATUS_NO_RESOURCES); 225 ISCSI_LOGIN_STATUS_NO_RESOURCES);
226 pr_err("Could not allocate memory for session\n"); 226 pr_err("Could not allocate memory for session\n");
227 return -1; 227 return -ENOMEM;
228 } 228 }
229 229
230 iscsi_login_set_conn_values(sess, conn, pdu->cid); 230 iscsi_login_set_conn_values(sess, conn, pdu->cid);
@@ -250,7 +250,8 @@ static int iscsi_login_zero_tsih_s1(
250 pr_err("idr_pre_get() for sess_idr failed\n"); 250 pr_err("idr_pre_get() for sess_idr failed\n");
251 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 251 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
252 ISCSI_LOGIN_STATUS_NO_RESOURCES); 252 ISCSI_LOGIN_STATUS_NO_RESOURCES);
253 return -1; 253 kfree(sess);
254 return -ENOMEM;
254 } 255 }
255 spin_lock(&sess_idr_lock); 256 spin_lock(&sess_idr_lock);
256 idr_get_new(&sess_idr, NULL, &sess->session_index); 257 idr_get_new(&sess_idr, NULL, &sess->session_index);
@@ -270,14 +271,16 @@ static int iscsi_login_zero_tsih_s1(
270 ISCSI_LOGIN_STATUS_NO_RESOURCES); 271 ISCSI_LOGIN_STATUS_NO_RESOURCES);
271 pr_err("Unable to allocate memory for" 272 pr_err("Unable to allocate memory for"
272 " struct iscsi_sess_ops.\n"); 273 " struct iscsi_sess_ops.\n");
273 return -1; 274 kfree(sess);
275 return -ENOMEM;
274 } 276 }
275 277
276 sess->se_sess = transport_init_session(); 278 sess->se_sess = transport_init_session();
277 if (!sess->se_sess) { 279 if (IS_ERR(sess->se_sess)) {
278 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 280 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
279 ISCSI_LOGIN_STATUS_NO_RESOURCES); 281 ISCSI_LOGIN_STATUS_NO_RESOURCES);
280 return -1; 282 kfree(sess);
283 return -ENOMEM;
281 } 284 }
282 285
283 return 0; 286 return 0;
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 426cd4bf6a9a..98936cb7c294 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -981,14 +981,13 @@ struct iscsi_login *iscsi_target_init_negotiation(
981 return NULL; 981 return NULL;
982 } 982 }
983 983
984 login->req = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL); 984 login->req = kmemdup(login_pdu, ISCSI_HDR_LEN, GFP_KERNEL);
985 if (!login->req) { 985 if (!login->req) {
986 pr_err("Unable to allocate memory for Login Request.\n"); 986 pr_err("Unable to allocate memory for Login Request.\n");
987 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 987 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
988 ISCSI_LOGIN_STATUS_NO_RESOURCES); 988 ISCSI_LOGIN_STATUS_NO_RESOURCES);
989 goto out; 989 goto out;
990 } 990 }
991 memcpy(login->req, login_pdu, ISCSI_HDR_LEN);
992 991
993 login->req_buf = kzalloc(MAX_KEY_VALUE_PAIRS, GFP_KERNEL); 992 login->req_buf = kzalloc(MAX_KEY_VALUE_PAIRS, GFP_KERNEL);
994 if (!login->req_buf) { 993 if (!login->req_buf) {
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 3df1c9b8ae6b..81d5832fbbd5 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -113,11 +113,9 @@ static struct se_cmd *tcm_loop_allocate_core_cmd(
113 scsi_bufflen(sc), sc->sc_data_direction, sam_task_attr, 113 scsi_bufflen(sc), sc->sc_data_direction, sam_task_attr,
114 &tl_cmd->tl_sense_buf[0]); 114 &tl_cmd->tl_sense_buf[0]);
115 115
116 /*
117 * Signal BIDI usage with T_TASK(cmd)->t_tasks_bidi
118 */
119 if (scsi_bidi_cmnd(sc)) 116 if (scsi_bidi_cmnd(sc))
120 se_cmd->t_tasks_bidi = 1; 117 se_cmd->se_cmd_flags |= SCF_BIDI;
118
121 /* 119 /*
122 * Locate the struct se_lun pointer and attach it to struct se_cmd 120 * Locate the struct se_lun pointer and attach it to struct se_cmd
123 */ 121 */
@@ -148,27 +146,13 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
148 * Allocate the necessary tasks to complete the received CDB+data 146 * Allocate the necessary tasks to complete the received CDB+data
149 */ 147 */
150 ret = transport_generic_allocate_tasks(se_cmd, sc->cmnd); 148 ret = transport_generic_allocate_tasks(se_cmd, sc->cmnd);
151 if (ret == -ENOMEM) { 149 if (ret != 0)
152 /* Out of Resources */ 150 return ret;
153 return PYX_TRANSPORT_LU_COMM_FAILURE;
154 } else if (ret == -EINVAL) {
155 /*
156 * Handle case for SAM_STAT_RESERVATION_CONFLICT
157 */
158 if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
159 return PYX_TRANSPORT_RESERVATION_CONFLICT;
160 /*
161 * Otherwise, return SAM_STAT_CHECK_CONDITION and return
162 * sense data.
163 */
164 return PYX_TRANSPORT_USE_SENSE_REASON;
165 }
166
167 /* 151 /*
168 * For BIDI commands, pass in the extra READ buffer 152 * For BIDI commands, pass in the extra READ buffer
169 * to transport_generic_map_mem_to_cmd() below.. 153 * to transport_generic_map_mem_to_cmd() below..
170 */ 154 */
171 if (se_cmd->t_tasks_bidi) { 155 if (se_cmd->se_cmd_flags & SCF_BIDI) {
172 struct scsi_data_buffer *sdb = scsi_in(sc); 156 struct scsi_data_buffer *sdb = scsi_in(sc);
173 157
174 sgl_bidi = sdb->table.sgl; 158 sgl_bidi = sdb->table.sgl;
@@ -194,12 +178,8 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
194 } 178 }
195 179
196 /* Tell the core about our preallocated memory */ 180 /* Tell the core about our preallocated memory */
197 ret = transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc), 181 return transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc),
198 scsi_sg_count(sc), sgl_bidi, sgl_bidi_count); 182 scsi_sg_count(sc), sgl_bidi, sgl_bidi_count);
199 if (ret < 0)
200 return PYX_TRANSPORT_LU_COMM_FAILURE;
201
202 return 0;
203} 183}
204 184
205/* 185/*
@@ -1360,17 +1340,16 @@ void tcm_loop_drop_scsi_hba(
1360{ 1340{
1361 struct tcm_loop_hba *tl_hba = container_of(wwn, 1341 struct tcm_loop_hba *tl_hba = container_of(wwn,
1362 struct tcm_loop_hba, tl_hba_wwn); 1342 struct tcm_loop_hba, tl_hba_wwn);
1363 int host_no = tl_hba->sh->host_no; 1343
1344 pr_debug("TCM_Loop_ConfigFS: Deallocating emulated Target"
1345 " SAS Address: %s at Linux/SCSI Host ID: %d\n",
1346 tl_hba->tl_wwn_address, tl_hba->sh->host_no);
1364 /* 1347 /*
1365 * Call device_unregister() on the original tl_hba->dev. 1348 * Call device_unregister() on the original tl_hba->dev.
1366 * tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will 1349 * tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will
1367 * release *tl_hba; 1350 * release *tl_hba;
1368 */ 1351 */
1369 device_unregister(&tl_hba->dev); 1352 device_unregister(&tl_hba->dev);
1370
1371 pr_debug("TCM_Loop_ConfigFS: Deallocated emulated Target"
1372 " SAS Address: %s at Linux/SCSI Host ID: %d\n",
1373 config_item_name(&wwn->wwn_group.cg_item), host_no);
1374} 1353}
1375 1354
1376/* Start items for tcm_loop_cit */ 1355/* Start items for tcm_loop_cit */
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 88f2ad43ec8b..1dcbef499d6a 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -191,9 +191,10 @@ int target_emulate_set_target_port_groups(struct se_task *task)
191 int alua_access_state, primary = 0, rc; 191 int alua_access_state, primary = 0, rc;
192 u16 tg_pt_id, rtpi; 192 u16 tg_pt_id, rtpi;
193 193
194 if (!l_port) 194 if (!l_port) {
195 return PYX_TRANSPORT_LU_COMM_FAILURE; 195 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
196 196 return -EINVAL;
197 }
197 buf = transport_kmap_first_data_page(cmd); 198 buf = transport_kmap_first_data_page(cmd);
198 199
199 /* 200 /*
@@ -203,7 +204,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
203 l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem; 204 l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem;
204 if (!l_tg_pt_gp_mem) { 205 if (!l_tg_pt_gp_mem) {
205 pr_err("Unable to access l_port->sep_alua_tg_pt_gp_mem\n"); 206 pr_err("Unable to access l_port->sep_alua_tg_pt_gp_mem\n");
206 rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; 207 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
208 rc = -EINVAL;
207 goto out; 209 goto out;
208 } 210 }
209 spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock); 211 spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
@@ -211,7 +213,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
211 if (!l_tg_pt_gp) { 213 if (!l_tg_pt_gp) {
212 spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock); 214 spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
213 pr_err("Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n"); 215 pr_err("Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n");
214 rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; 216 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
217 rc = -EINVAL;
215 goto out; 218 goto out;
216 } 219 }
217 rc = (l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA); 220 rc = (l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA);
@@ -220,7 +223,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
220 if (!rc) { 223 if (!rc) {
221 pr_debug("Unable to process SET_TARGET_PORT_GROUPS" 224 pr_debug("Unable to process SET_TARGET_PORT_GROUPS"
222 " while TPGS_EXPLICT_ALUA is disabled\n"); 225 " while TPGS_EXPLICT_ALUA is disabled\n");
223 rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; 226 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
227 rc = -EINVAL;
224 goto out; 228 goto out;
225 } 229 }
226 230
@@ -245,7 +249,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
245 * REQUEST, and the additional sense code set to INVALID 249 * REQUEST, and the additional sense code set to INVALID
246 * FIELD IN PARAMETER LIST. 250 * FIELD IN PARAMETER LIST.
247 */ 251 */
248 rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST; 252 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
253 rc = -EINVAL;
249 goto out; 254 goto out;
250 } 255 }
251 rc = -1; 256 rc = -1;
@@ -298,7 +303,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
298 * throw an exception with ASCQ: INVALID_PARAMETER_LIST 303 * throw an exception with ASCQ: INVALID_PARAMETER_LIST
299 */ 304 */
300 if (rc != 0) { 305 if (rc != 0) {
301 rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST; 306 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
307 rc = -EINVAL;
302 goto out; 308 goto out;
303 } 309 }
304 } else { 310 } else {
@@ -335,7 +341,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
335 * INVALID_PARAMETER_LIST 341 * INVALID_PARAMETER_LIST
336 */ 342 */
337 if (rc != 0) { 343 if (rc != 0) {
338 rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST; 344 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
345 rc = -EINVAL;
339 goto out; 346 goto out;
340 } 347 }
341 } 348 }
@@ -1184,7 +1191,6 @@ void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
1184 * struct t10_alua_lu_gp. 1191 * struct t10_alua_lu_gp.
1185 */ 1192 */
1186 spin_lock(&lu_gps_lock); 1193 spin_lock(&lu_gps_lock);
1187 atomic_set(&lu_gp->lu_gp_shutdown, 1);
1188 list_del(&lu_gp->lu_gp_node); 1194 list_del(&lu_gp->lu_gp_node);
1189 alua_lu_gps_count--; 1195 alua_lu_gps_count--;
1190 spin_unlock(&lu_gps_lock); 1196 spin_unlock(&lu_gps_lock);
@@ -1438,7 +1444,6 @@ struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
1438 1444
1439 tg_pt_gp_mem->tg_pt = port; 1445 tg_pt_gp_mem->tg_pt = port;
1440 port->sep_alua_tg_pt_gp_mem = tg_pt_gp_mem; 1446 port->sep_alua_tg_pt_gp_mem = tg_pt_gp_mem;
1441 atomic_set(&port->sep_tg_pt_gp_active, 1);
1442 1447
1443 return tg_pt_gp_mem; 1448 return tg_pt_gp_mem;
1444} 1449}
diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
index 683ba02b8247..831468b3163d 100644
--- a/drivers/target/target_core_cdb.c
+++ b/drivers/target/target_core_cdb.c
@@ -478,7 +478,7 @@ target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
478 if (cmd->data_length < 60) 478 if (cmd->data_length < 60)
479 return 0; 479 return 0;
480 480
481 buf[2] = 0x3c; 481 buf[3] = 0x3c;
482 /* Set HEADSUP, ORDSUP, SIMPSUP */ 482 /* Set HEADSUP, ORDSUP, SIMPSUP */
483 buf[5] = 0x07; 483 buf[5] = 0x07;
484 484
@@ -703,6 +703,7 @@ int target_emulate_inquiry(struct se_task *task)
703 if (cmd->data_length < 4) { 703 if (cmd->data_length < 4) {
704 pr_err("SCSI Inquiry payload length: %u" 704 pr_err("SCSI Inquiry payload length: %u"
705 " too small for EVPD=1\n", cmd->data_length); 705 " too small for EVPD=1\n", cmd->data_length);
706 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
706 return -EINVAL; 707 return -EINVAL;
707 } 708 }
708 709
@@ -719,6 +720,7 @@ int target_emulate_inquiry(struct se_task *task)
719 } 720 }
720 721
721 pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]); 722 pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]);
723 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
722 ret = -EINVAL; 724 ret = -EINVAL;
723 725
724out_unmap: 726out_unmap:
@@ -969,7 +971,8 @@ int target_emulate_modesense(struct se_task *task)
969 default: 971 default:
970 pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n", 972 pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n",
971 cdb[2] & 0x3f, cdb[3]); 973 cdb[2] & 0x3f, cdb[3]);
972 return PYX_TRANSPORT_UNKNOWN_MODE_PAGE; 974 cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE;
975 return -EINVAL;
973 } 976 }
974 offset += length; 977 offset += length;
975 978
@@ -1027,7 +1030,8 @@ int target_emulate_request_sense(struct se_task *task)
1027 if (cdb[1] & 0x01) { 1030 if (cdb[1] & 0x01) {
1028 pr_err("REQUEST_SENSE description emulation not" 1031 pr_err("REQUEST_SENSE description emulation not"
1029 " supported\n"); 1032 " supported\n");
1030 return PYX_TRANSPORT_INVALID_CDB_FIELD; 1033 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
1034 return -ENOSYS;
1031 } 1035 }
1032 1036
1033 buf = transport_kmap_first_data_page(cmd); 1037 buf = transport_kmap_first_data_page(cmd);
@@ -1100,7 +1104,8 @@ int target_emulate_unmap(struct se_task *task)
1100 if (!dev->transport->do_discard) { 1104 if (!dev->transport->do_discard) {
1101 pr_err("UNMAP emulation not supported for: %s\n", 1105 pr_err("UNMAP emulation not supported for: %s\n",
1102 dev->transport->name); 1106 dev->transport->name);
1103 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; 1107 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
1108 return -ENOSYS;
1104 } 1109 }
1105 1110
1106 /* First UNMAP block descriptor starts at 8 byte offset */ 1111 /* First UNMAP block descriptor starts at 8 byte offset */
@@ -1157,7 +1162,8 @@ int target_emulate_write_same(struct se_task *task)
1157 if (!dev->transport->do_discard) { 1162 if (!dev->transport->do_discard) {
1158 pr_err("WRITE_SAME emulation not supported" 1163 pr_err("WRITE_SAME emulation not supported"
1159 " for: %s\n", dev->transport->name); 1164 " for: %s\n", dev->transport->name);
1160 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; 1165 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
1166 return -ENOSYS;
1161 } 1167 }
1162 1168
1163 if (cmd->t_task_cdb[0] == WRITE_SAME) 1169 if (cmd->t_task_cdb[0] == WRITE_SAME)
@@ -1193,11 +1199,13 @@ int target_emulate_write_same(struct se_task *task)
1193int target_emulate_synchronize_cache(struct se_task *task) 1199int target_emulate_synchronize_cache(struct se_task *task)
1194{ 1200{
1195 struct se_device *dev = task->task_se_cmd->se_dev; 1201 struct se_device *dev = task->task_se_cmd->se_dev;
1202 struct se_cmd *cmd = task->task_se_cmd;
1196 1203
1197 if (!dev->transport->do_sync_cache) { 1204 if (!dev->transport->do_sync_cache) {
1198 pr_err("SYNCHRONIZE_CACHE emulation not supported" 1205 pr_err("SYNCHRONIZE_CACHE emulation not supported"
1199 " for: %s\n", dev->transport->name); 1206 " for: %s\n", dev->transport->name);
1200 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; 1207 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
1208 return -ENOSYS;
1201 } 1209 }
1202 1210
1203 dev->transport->do_sync_cache(task); 1211 dev->transport->do_sync_cache(task);
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index e0c1e8a8dd4e..93d4f6a1b798 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -67,9 +67,6 @@ static struct config_group target_core_hbagroup;
67static struct config_group alua_group; 67static struct config_group alua_group;
68static struct config_group alua_lu_gps_group; 68static struct config_group alua_lu_gps_group;
69 69
70static DEFINE_SPINLOCK(se_device_lock);
71static LIST_HEAD(se_dev_list);
72
73static inline struct se_hba * 70static inline struct se_hba *
74item_to_hba(struct config_item *item) 71item_to_hba(struct config_item *item)
75{ 72{
@@ -2741,7 +2738,6 @@ static struct config_group *target_core_make_subdev(
2741 " struct se_subsystem_dev\n"); 2738 " struct se_subsystem_dev\n");
2742 goto unlock; 2739 goto unlock;
2743 } 2740 }
2744 INIT_LIST_HEAD(&se_dev->se_dev_node);
2745 INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list); 2741 INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
2746 spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock); 2742 spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
2747 INIT_LIST_HEAD(&se_dev->t10_pr.registration_list); 2743 INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);
@@ -2777,9 +2773,6 @@ static struct config_group *target_core_make_subdev(
2777 " from allocate_virtdevice()\n"); 2773 " from allocate_virtdevice()\n");
2778 goto out; 2774 goto out;
2779 } 2775 }
2780 spin_lock(&se_device_lock);
2781 list_add_tail(&se_dev->se_dev_node, &se_dev_list);
2782 spin_unlock(&se_device_lock);
2783 2776
2784 config_group_init_type_name(&se_dev->se_dev_group, name, 2777 config_group_init_type_name(&se_dev->se_dev_group, name,
2785 &target_core_dev_cit); 2778 &target_core_dev_cit);
@@ -2874,10 +2867,6 @@ static void target_core_drop_subdev(
2874 mutex_lock(&hba->hba_access_mutex); 2867 mutex_lock(&hba->hba_access_mutex);
2875 t = hba->transport; 2868 t = hba->transport;
2876 2869
2877 spin_lock(&se_device_lock);
2878 list_del(&se_dev->se_dev_node);
2879 spin_unlock(&se_device_lock);
2880
2881 dev_stat_grp = &se_dev->dev_stat_grps.stat_group; 2870 dev_stat_grp = &se_dev->dev_stat_grps.stat_group;
2882 for (i = 0; dev_stat_grp->default_groups[i]; i++) { 2871 for (i = 0; dev_stat_grp->default_groups[i]; i++) {
2883 df_item = &dev_stat_grp->default_groups[i]->cg_item; 2872 df_item = &dev_stat_grp->default_groups[i]->cg_item;
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index ba5edec2c5f8..9b8639425472 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -104,7 +104,6 @@ int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
104 se_cmd->se_lun = deve->se_lun; 104 se_cmd->se_lun = deve->se_lun;
105 se_cmd->pr_res_key = deve->pr_res_key; 105 se_cmd->pr_res_key = deve->pr_res_key;
106 se_cmd->orig_fe_lun = unpacked_lun; 106 se_cmd->orig_fe_lun = unpacked_lun;
107 se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev;
108 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; 107 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
109 } 108 }
110 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags); 109 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
@@ -137,7 +136,6 @@ int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
137 se_lun = &se_sess->se_tpg->tpg_virt_lun0; 136 se_lun = &se_sess->se_tpg->tpg_virt_lun0;
138 se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0; 137 se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
139 se_cmd->orig_fe_lun = 0; 138 se_cmd->orig_fe_lun = 0;
140 se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev;
141 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; 139 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
142 } 140 }
143 /* 141 /*
@@ -200,7 +198,6 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
200 se_lun = deve->se_lun; 198 se_lun = deve->se_lun;
201 se_cmd->pr_res_key = deve->pr_res_key; 199 se_cmd->pr_res_key = deve->pr_res_key;
202 se_cmd->orig_fe_lun = unpacked_lun; 200 se_cmd->orig_fe_lun = unpacked_lun;
203 se_cmd->se_orig_obj_ptr = se_cmd->se_dev;
204 } 201 }
205 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags); 202 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
206 203
@@ -708,7 +705,7 @@ done:
708 705
709 se_task->task_scsi_status = GOOD; 706 se_task->task_scsi_status = GOOD;
710 transport_complete_task(se_task, 1); 707 transport_complete_task(se_task, 1);
711 return PYX_TRANSPORT_SENT_TO_TRANSPORT; 708 return 0;
712} 709}
713 710
714/* se_release_device_for_hba(): 711/* se_release_device_for_hba():
@@ -957,8 +954,12 @@ int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
957 return -EINVAL; 954 return -EINVAL;
958 } 955 }
959 956
960 pr_err("dpo_emulated not supported\n"); 957 if (flag) {
961 return -EINVAL; 958 pr_err("dpo_emulated not supported\n");
959 return -EINVAL;
960 }
961
962 return 0;
962} 963}
963 964
964int se_dev_set_emulate_fua_write(struct se_device *dev, int flag) 965int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
@@ -968,7 +969,7 @@ int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
968 return -EINVAL; 969 return -EINVAL;
969 } 970 }
970 971
971 if (dev->transport->fua_write_emulated == 0) { 972 if (flag && dev->transport->fua_write_emulated == 0) {
972 pr_err("fua_write_emulated not supported\n"); 973 pr_err("fua_write_emulated not supported\n");
973 return -EINVAL; 974 return -EINVAL;
974 } 975 }
@@ -985,8 +986,12 @@ int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
985 return -EINVAL; 986 return -EINVAL;
986 } 987 }
987 988
988 pr_err("ua read emulated not supported\n"); 989 if (flag) {
989 return -EINVAL; 990 pr_err("ua read emulated not supported\n");
991 return -EINVAL;
992 }
993
994 return 0;
990} 995}
991 996
992int se_dev_set_emulate_write_cache(struct se_device *dev, int flag) 997int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
@@ -995,7 +1000,7 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
995 pr_err("Illegal value %d\n", flag); 1000 pr_err("Illegal value %d\n", flag);
996 return -EINVAL; 1001 return -EINVAL;
997 } 1002 }
998 if (dev->transport->write_cache_emulated == 0) { 1003 if (flag && dev->transport->write_cache_emulated == 0) {
999 pr_err("write_cache_emulated not supported\n"); 1004 pr_err("write_cache_emulated not supported\n");
1000 return -EINVAL; 1005 return -EINVAL;
1001 } 1006 }
@@ -1056,7 +1061,7 @@ int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
1056 * We expect this value to be non-zero when generic Block Layer 1061 * We expect this value to be non-zero when generic Block Layer
1057 * Discard supported is detected iblock_create_virtdevice(). 1062 * Discard supported is detected iblock_create_virtdevice().
1058 */ 1063 */
1059 if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) { 1064 if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
1060 pr_err("Generic Block Discard not supported\n"); 1065 pr_err("Generic Block Discard not supported\n");
1061 return -ENOSYS; 1066 return -ENOSYS;
1062 } 1067 }
@@ -1077,7 +1082,7 @@ int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
1077 * We expect this value to be non-zero when generic Block Layer 1082 * We expect this value to be non-zero when generic Block Layer
1078 * Discard supported is detected iblock_create_virtdevice(). 1083 * Discard supported is detected iblock_create_virtdevice().
1079 */ 1084 */
1080 if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) { 1085 if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
1081 pr_err("Generic Block Discard not supported\n"); 1086 pr_err("Generic Block Discard not supported\n");
1082 return -ENOSYS; 1087 return -ENOSYS;
1083 } 1088 }
@@ -1587,7 +1592,6 @@ int core_dev_setup_virtual_lun0(void)
1587 ret = -ENOMEM; 1592 ret = -ENOMEM;
1588 goto out; 1593 goto out;
1589 } 1594 }
1590 INIT_LIST_HEAD(&se_dev->se_dev_node);
1591 INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list); 1595 INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
1592 spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock); 1596 spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
1593 INIT_LIST_HEAD(&se_dev->t10_pr.registration_list); 1597 INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 67cd6fe05bfa..b4864fba4ef0 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -289,9 +289,9 @@ static int fd_do_readv(struct se_task *task)
289 return -ENOMEM; 289 return -ENOMEM;
290 } 290 }
291 291
292 for (i = 0; i < task->task_sg_nents; i++) { 292 for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
293 iov[i].iov_len = sg[i].length; 293 iov[i].iov_len = sg->length;
294 iov[i].iov_base = sg_virt(&sg[i]); 294 iov[i].iov_base = sg_virt(sg);
295 } 295 }
296 296
297 old_fs = get_fs(); 297 old_fs = get_fs();
@@ -342,9 +342,9 @@ static int fd_do_writev(struct se_task *task)
342 return -ENOMEM; 342 return -ENOMEM;
343 } 343 }
344 344
345 for (i = 0; i < task->task_sg_nents; i++) { 345 for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
346 iov[i].iov_len = sg[i].length; 346 iov[i].iov_len = sg->length;
347 iov[i].iov_base = sg_virt(&sg[i]); 347 iov[i].iov_base = sg_virt(sg);
348 } 348 }
349 349
350 old_fs = get_fs(); 350 old_fs = get_fs();
@@ -438,7 +438,7 @@ static int fd_do_task(struct se_task *task)
438 if (ret > 0 && 438 if (ret > 0 &&
439 dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 && 439 dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 &&
440 dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && 440 dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
441 cmd->t_tasks_fua) { 441 (cmd->se_cmd_flags & SCF_FUA)) {
442 /* 442 /*
443 * We might need to be a bit smarter here 443 * We might need to be a bit smarter here
444 * and return some sense data to let the initiator 444 * and return some sense data to let the initiator
@@ -449,13 +449,15 @@ static int fd_do_task(struct se_task *task)
449 449
450 } 450 }
451 451
452 if (ret < 0) 452 if (ret < 0) {
453 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
453 return ret; 454 return ret;
455 }
454 if (ret) { 456 if (ret) {
455 task->task_scsi_status = GOOD; 457 task->task_scsi_status = GOOD;
456 transport_complete_task(task, 1); 458 transport_complete_task(task, 1);
457 } 459 }
458 return PYX_TRANSPORT_SENT_TO_TRANSPORT; 460 return 0;
459} 461}
460 462
461/* fd_free_task(): (Part of se_subsystem_api_t template) 463/* fd_free_task(): (Part of se_subsystem_api_t template)
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 7698efe29262..4aa992204438 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -531,7 +531,7 @@ static int iblock_do_task(struct se_task *task)
531 */ 531 */
532 if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 || 532 if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 ||
533 (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && 533 (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
534 task->task_se_cmd->t_tasks_fua)) 534 (cmd->se_cmd_flags & SCF_FUA)))
535 rw = WRITE_FUA; 535 rw = WRITE_FUA;
536 else 536 else
537 rw = WRITE; 537 rw = WRITE;
@@ -554,12 +554,15 @@ static int iblock_do_task(struct se_task *task)
554 else { 554 else {
555 pr_err("Unsupported SCSI -> BLOCK LBA conversion:" 555 pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
556 " %u\n", dev->se_sub_dev->se_dev_attrib.block_size); 556 " %u\n", dev->se_sub_dev->se_dev_attrib.block_size);
557 return PYX_TRANSPORT_LU_COMM_FAILURE; 557 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
558 return -ENOSYS;
558 } 559 }
559 560
560 bio = iblock_get_bio(task, block_lba, sg_num); 561 bio = iblock_get_bio(task, block_lba, sg_num);
561 if (!bio) 562 if (!bio) {
562 return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; 563 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
564 return -ENOMEM;
565 }
563 566
564 bio_list_init(&list); 567 bio_list_init(&list);
565 bio_list_add(&list, bio); 568 bio_list_add(&list, bio);
@@ -588,12 +591,13 @@ static int iblock_do_task(struct se_task *task)
588 submit_bio(rw, bio); 591 submit_bio(rw, bio);
589 blk_finish_plug(&plug); 592 blk_finish_plug(&plug);
590 593
591 return PYX_TRANSPORT_SENT_TO_TRANSPORT; 594 return 0;
592 595
593fail: 596fail:
594 while ((bio = bio_list_pop(&list))) 597 while ((bio = bio_list_pop(&list)))
595 bio_put(bio); 598 bio_put(bio);
596 return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; 599 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
600 return -ENOMEM;
597} 601}
598 602
599static u32 iblock_get_device_rev(struct se_device *dev) 603static u32 iblock_get_device_rev(struct se_device *dev)
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 5a4ebfc3a54f..95dee7074aeb 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -191,7 +191,7 @@ static int target_check_scsi2_reservation_conflict(struct se_cmd *cmd, int *ret)
191 pr_err("Received legacy SPC-2 RESERVE/RELEASE" 191 pr_err("Received legacy SPC-2 RESERVE/RELEASE"
192 " while active SPC-3 registrations exist," 192 " while active SPC-3 registrations exist,"
193 " returning RESERVATION_CONFLICT\n"); 193 " returning RESERVATION_CONFLICT\n");
194 *ret = PYX_TRANSPORT_RESERVATION_CONFLICT; 194 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
195 return true; 195 return true;
196 } 196 }
197 197
@@ -252,7 +252,8 @@ int target_scsi2_reservation_reserve(struct se_task *task)
252 (cmd->t_task_cdb[1] & 0x02)) { 252 (cmd->t_task_cdb[1] & 0x02)) {
253 pr_err("LongIO and Obselete Bits set, returning" 253 pr_err("LongIO and Obselete Bits set, returning"
254 " ILLEGAL_REQUEST\n"); 254 " ILLEGAL_REQUEST\n");
255 ret = PYX_TRANSPORT_ILLEGAL_REQUEST; 255 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
256 ret = -EINVAL;
256 goto out; 257 goto out;
257 } 258 }
258 /* 259 /*
@@ -277,7 +278,8 @@ int target_scsi2_reservation_reserve(struct se_task *task)
277 " from %s \n", cmd->se_lun->unpacked_lun, 278 " from %s \n", cmd->se_lun->unpacked_lun,
278 cmd->se_deve->mapped_lun, 279 cmd->se_deve->mapped_lun,
279 sess->se_node_acl->initiatorname); 280 sess->se_node_acl->initiatorname);
280 ret = PYX_TRANSPORT_RESERVATION_CONFLICT; 281 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
282 ret = -EINVAL;
281 goto out_unlock; 283 goto out_unlock;
282 } 284 }
283 285
@@ -1510,7 +1512,8 @@ static int core_scsi3_decode_spec_i_port(
1510 tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), GFP_KERNEL); 1512 tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), GFP_KERNEL);
1511 if (!tidh_new) { 1513 if (!tidh_new) {
1512 pr_err("Unable to allocate tidh_new\n"); 1514 pr_err("Unable to allocate tidh_new\n");
1513 return PYX_TRANSPORT_LU_COMM_FAILURE; 1515 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1516 return -EINVAL;
1514 } 1517 }
1515 INIT_LIST_HEAD(&tidh_new->dest_list); 1518 INIT_LIST_HEAD(&tidh_new->dest_list);
1516 tidh_new->dest_tpg = tpg; 1519 tidh_new->dest_tpg = tpg;
@@ -1522,7 +1525,8 @@ static int core_scsi3_decode_spec_i_port(
1522 sa_res_key, all_tg_pt, aptpl); 1525 sa_res_key, all_tg_pt, aptpl);
1523 if (!local_pr_reg) { 1526 if (!local_pr_reg) {
1524 kfree(tidh_new); 1527 kfree(tidh_new);
1525 return PYX_TRANSPORT_LU_COMM_FAILURE; 1528 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1529 return -ENOMEM;
1526 } 1530 }
1527 tidh_new->dest_pr_reg = local_pr_reg; 1531 tidh_new->dest_pr_reg = local_pr_reg;
1528 /* 1532 /*
@@ -1548,7 +1552,8 @@ static int core_scsi3_decode_spec_i_port(
1548 pr_err("SPC-3 PR: Illegal tpdl: %u + 28 byte header" 1552 pr_err("SPC-3 PR: Illegal tpdl: %u + 28 byte header"
1549 " does not equal CDB data_length: %u\n", tpdl, 1553 " does not equal CDB data_length: %u\n", tpdl,
1550 cmd->data_length); 1554 cmd->data_length);
1551 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; 1555 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
1556 ret = -EINVAL;
1552 goto out; 1557 goto out;
1553 } 1558 }
1554 /* 1559 /*
@@ -1598,7 +1603,9 @@ static int core_scsi3_decode_spec_i_port(
1598 " for tmp_tpg\n"); 1603 " for tmp_tpg\n");
1599 atomic_dec(&tmp_tpg->tpg_pr_ref_count); 1604 atomic_dec(&tmp_tpg->tpg_pr_ref_count);
1600 smp_mb__after_atomic_dec(); 1605 smp_mb__after_atomic_dec();
1601 ret = PYX_TRANSPORT_LU_COMM_FAILURE; 1606 cmd->scsi_sense_reason =
1607 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1608 ret = -EINVAL;
1602 goto out; 1609 goto out;
1603 } 1610 }
1604 /* 1611 /*
@@ -1628,7 +1635,9 @@ static int core_scsi3_decode_spec_i_port(
1628 atomic_dec(&dest_node_acl->acl_pr_ref_count); 1635 atomic_dec(&dest_node_acl->acl_pr_ref_count);
1629 smp_mb__after_atomic_dec(); 1636 smp_mb__after_atomic_dec();
1630 core_scsi3_tpg_undepend_item(tmp_tpg); 1637 core_scsi3_tpg_undepend_item(tmp_tpg);
1631 ret = PYX_TRANSPORT_LU_COMM_FAILURE; 1638 cmd->scsi_sense_reason =
1639 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1640 ret = -EINVAL;
1632 goto out; 1641 goto out;
1633 } 1642 }
1634 1643
@@ -1646,7 +1655,8 @@ static int core_scsi3_decode_spec_i_port(
1646 if (!dest_tpg) { 1655 if (!dest_tpg) {
1647 pr_err("SPC-3 PR SPEC_I_PT: Unable to locate" 1656 pr_err("SPC-3 PR SPEC_I_PT: Unable to locate"
1648 " dest_tpg\n"); 1657 " dest_tpg\n");
1649 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; 1658 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
1659 ret = -EINVAL;
1650 goto out; 1660 goto out;
1651 } 1661 }
1652#if 0 1662#if 0
@@ -1660,7 +1670,8 @@ static int core_scsi3_decode_spec_i_port(
1660 " %u for Transport ID: %s\n", tid_len, ptr); 1670 " %u for Transport ID: %s\n", tid_len, ptr);
1661 core_scsi3_nodeacl_undepend_item(dest_node_acl); 1671 core_scsi3_nodeacl_undepend_item(dest_node_acl);
1662 core_scsi3_tpg_undepend_item(dest_tpg); 1672 core_scsi3_tpg_undepend_item(dest_tpg);
1663 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; 1673 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
1674 ret = -EINVAL;
1664 goto out; 1675 goto out;
1665 } 1676 }
1666 /* 1677 /*
@@ -1678,7 +1689,8 @@ static int core_scsi3_decode_spec_i_port(
1678 1689
1679 core_scsi3_nodeacl_undepend_item(dest_node_acl); 1690 core_scsi3_nodeacl_undepend_item(dest_node_acl);
1680 core_scsi3_tpg_undepend_item(dest_tpg); 1691 core_scsi3_tpg_undepend_item(dest_tpg);
1681 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; 1692 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
1693 ret = -EINVAL;
1682 goto out; 1694 goto out;
1683 } 1695 }
1684 1696
@@ -1690,7 +1702,9 @@ static int core_scsi3_decode_spec_i_port(
1690 smp_mb__after_atomic_dec(); 1702 smp_mb__after_atomic_dec();
1691 core_scsi3_nodeacl_undepend_item(dest_node_acl); 1703 core_scsi3_nodeacl_undepend_item(dest_node_acl);
1692 core_scsi3_tpg_undepend_item(dest_tpg); 1704 core_scsi3_tpg_undepend_item(dest_tpg);
1693 ret = PYX_TRANSPORT_LU_COMM_FAILURE; 1705 cmd->scsi_sense_reason =
1706 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1707 ret = -EINVAL;
1694 goto out; 1708 goto out;
1695 } 1709 }
1696#if 0 1710#if 0
@@ -1727,7 +1741,9 @@ static int core_scsi3_decode_spec_i_port(
1727 core_scsi3_lunacl_undepend_item(dest_se_deve); 1741 core_scsi3_lunacl_undepend_item(dest_se_deve);
1728 core_scsi3_nodeacl_undepend_item(dest_node_acl); 1742 core_scsi3_nodeacl_undepend_item(dest_node_acl);
1729 core_scsi3_tpg_undepend_item(dest_tpg); 1743 core_scsi3_tpg_undepend_item(dest_tpg);
1730 ret = PYX_TRANSPORT_LU_COMM_FAILURE; 1744 cmd->scsi_sense_reason =
1745 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1746 ret = -ENOMEM;
1731 goto out; 1747 goto out;
1732 } 1748 }
1733 INIT_LIST_HEAD(&tidh_new->dest_list); 1749 INIT_LIST_HEAD(&tidh_new->dest_list);
@@ -1759,7 +1775,8 @@ static int core_scsi3_decode_spec_i_port(
1759 core_scsi3_nodeacl_undepend_item(dest_node_acl); 1775 core_scsi3_nodeacl_undepend_item(dest_node_acl);
1760 core_scsi3_tpg_undepend_item(dest_tpg); 1776 core_scsi3_tpg_undepend_item(dest_tpg);
1761 kfree(tidh_new); 1777 kfree(tidh_new);
1762 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; 1778 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
1779 ret = -EINVAL;
1763 goto out; 1780 goto out;
1764 } 1781 }
1765 tidh_new->dest_pr_reg = dest_pr_reg; 1782 tidh_new->dest_pr_reg = dest_pr_reg;
@@ -2098,7 +2115,8 @@ static int core_scsi3_emulate_pro_register(
2098 2115
2099 if (!se_sess || !se_lun) { 2116 if (!se_sess || !se_lun) {
2100 pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n"); 2117 pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
2101 return PYX_TRANSPORT_LU_COMM_FAILURE; 2118 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2119 return -EINVAL;
2102 } 2120 }
2103 se_tpg = se_sess->se_tpg; 2121 se_tpg = se_sess->se_tpg;
2104 se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; 2122 se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
@@ -2117,13 +2135,14 @@ static int core_scsi3_emulate_pro_register(
2117 if (res_key) { 2135 if (res_key) {
2118 pr_warn("SPC-3 PR: Reservation Key non-zero" 2136 pr_warn("SPC-3 PR: Reservation Key non-zero"
2119 " for SA REGISTER, returning CONFLICT\n"); 2137 " for SA REGISTER, returning CONFLICT\n");
2120 return PYX_TRANSPORT_RESERVATION_CONFLICT; 2138 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
2139 return -EINVAL;
2121 } 2140 }
2122 /* 2141 /*
2123 * Do nothing but return GOOD status. 2142 * Do nothing but return GOOD status.
2124 */ 2143 */
2125 if (!sa_res_key) 2144 if (!sa_res_key)
2126 return PYX_TRANSPORT_SENT_TO_TRANSPORT; 2145 return 0;
2127 2146
2128 if (!spec_i_pt) { 2147 if (!spec_i_pt) {
2129 /* 2148 /*
@@ -2138,7 +2157,8 @@ static int core_scsi3_emulate_pro_register(
2138 if (ret != 0) { 2157 if (ret != 0) {
2139 pr_err("Unable to allocate" 2158 pr_err("Unable to allocate"
2140 " struct t10_pr_registration\n"); 2159 " struct t10_pr_registration\n");
2141 return PYX_TRANSPORT_INVALID_PARAMETER_LIST; 2160 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
2161 return -EINVAL;
2142 } 2162 }
2143 } else { 2163 } else {
2144 /* 2164 /*
@@ -2197,14 +2217,16 @@ static int core_scsi3_emulate_pro_register(
2197 " 0x%016Lx\n", res_key, 2217 " 0x%016Lx\n", res_key,
2198 pr_reg->pr_res_key); 2218 pr_reg->pr_res_key);
2199 core_scsi3_put_pr_reg(pr_reg); 2219 core_scsi3_put_pr_reg(pr_reg);
2200 return PYX_TRANSPORT_RESERVATION_CONFLICT; 2220 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
2221 return -EINVAL;
2201 } 2222 }
2202 } 2223 }
2203 if (spec_i_pt) { 2224 if (spec_i_pt) {
2204 pr_err("SPC-3 PR UNREGISTER: SPEC_I_PT" 2225 pr_err("SPC-3 PR UNREGISTER: SPEC_I_PT"
2205 " set while sa_res_key=0\n"); 2226 " set while sa_res_key=0\n");
2206 core_scsi3_put_pr_reg(pr_reg); 2227 core_scsi3_put_pr_reg(pr_reg);
2207 return PYX_TRANSPORT_INVALID_PARAMETER_LIST; 2228 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
2229 return -EINVAL;
2208 } 2230 }
2209 /* 2231 /*
2210 * An existing ALL_TG_PT=1 registration being released 2232 * An existing ALL_TG_PT=1 registration being released
@@ -2215,7 +2237,8 @@ static int core_scsi3_emulate_pro_register(
2215 " registration exists, but ALL_TG_PT=1 bit not" 2237 " registration exists, but ALL_TG_PT=1 bit not"
2216 " present in received PROUT\n"); 2238 " present in received PROUT\n");
2217 core_scsi3_put_pr_reg(pr_reg); 2239 core_scsi3_put_pr_reg(pr_reg);
2218 return PYX_TRANSPORT_INVALID_CDB_FIELD; 2240 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
2241 return -EINVAL;
2219 } 2242 }
2220 /* 2243 /*
2221 * Allocate APTPL metadata buffer used for UNREGISTER ops 2244 * Allocate APTPL metadata buffer used for UNREGISTER ops
@@ -2227,7 +2250,9 @@ static int core_scsi3_emulate_pro_register(
2227 pr_err("Unable to allocate" 2250 pr_err("Unable to allocate"
2228 " pr_aptpl_buf\n"); 2251 " pr_aptpl_buf\n");
2229 core_scsi3_put_pr_reg(pr_reg); 2252 core_scsi3_put_pr_reg(pr_reg);
2230 return PYX_TRANSPORT_LU_COMM_FAILURE; 2253 cmd->scsi_sense_reason =
2254 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2255 return -EINVAL;
2231 } 2256 }
2232 } 2257 }
2233 /* 2258 /*
@@ -2241,7 +2266,8 @@ static int core_scsi3_emulate_pro_register(
2241 if (pr_holder < 0) { 2266 if (pr_holder < 0) {
2242 kfree(pr_aptpl_buf); 2267 kfree(pr_aptpl_buf);
2243 core_scsi3_put_pr_reg(pr_reg); 2268 core_scsi3_put_pr_reg(pr_reg);
2244 return PYX_TRANSPORT_RESERVATION_CONFLICT; 2269 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
2270 return -EINVAL;
2245 } 2271 }
2246 2272
2247 spin_lock(&pr_tmpl->registration_lock); 2273 spin_lock(&pr_tmpl->registration_lock);
@@ -2405,7 +2431,8 @@ static int core_scsi3_pro_reserve(
2405 2431
2406 if (!se_sess || !se_lun) { 2432 if (!se_sess || !se_lun) {
2407 pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n"); 2433 pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
2408 return PYX_TRANSPORT_LU_COMM_FAILURE; 2434 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2435 return -EINVAL;
2409 } 2436 }
2410 se_tpg = se_sess->se_tpg; 2437 se_tpg = se_sess->se_tpg;
2411 se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; 2438 se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
@@ -2417,7 +2444,8 @@ static int core_scsi3_pro_reserve(
2417 if (!pr_reg) { 2444 if (!pr_reg) {
2418 pr_err("SPC-3 PR: Unable to locate" 2445 pr_err("SPC-3 PR: Unable to locate"
2419 " PR_REGISTERED *pr_reg for RESERVE\n"); 2446 " PR_REGISTERED *pr_reg for RESERVE\n");
2420 return PYX_TRANSPORT_LU_COMM_FAILURE; 2447 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2448 return -EINVAL;
2421 } 2449 }
2422 /* 2450 /*
2423 * From spc4r17 Section 5.7.9: Reserving: 2451 * From spc4r17 Section 5.7.9: Reserving:
@@ -2433,7 +2461,8 @@ static int core_scsi3_pro_reserve(
2433 " does not match existing SA REGISTER res_key:" 2461 " does not match existing SA REGISTER res_key:"
2434 " 0x%016Lx\n", res_key, pr_reg->pr_res_key); 2462 " 0x%016Lx\n", res_key, pr_reg->pr_res_key);
2435 core_scsi3_put_pr_reg(pr_reg); 2463 core_scsi3_put_pr_reg(pr_reg);
2436 return PYX_TRANSPORT_RESERVATION_CONFLICT; 2464 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
2465 return -EINVAL;
2437 } 2466 }
2438 /* 2467 /*
2439 * From spc4r17 Section 5.7.9: Reserving: 2468 * From spc4r17 Section 5.7.9: Reserving:
@@ -2448,7 +2477,8 @@ static int core_scsi3_pro_reserve(
2448 if (scope != PR_SCOPE_LU_SCOPE) { 2477 if (scope != PR_SCOPE_LU_SCOPE) {
2449 pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope); 2478 pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
2450 core_scsi3_put_pr_reg(pr_reg); 2479 core_scsi3_put_pr_reg(pr_reg);
2451 return PYX_TRANSPORT_INVALID_PARAMETER_LIST; 2480 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
2481 return -EINVAL;
2452 } 2482 }
2453 /* 2483 /*
2454 * See if we have an existing PR reservation holder pointer at 2484 * See if we have an existing PR reservation holder pointer at
@@ -2480,7 +2510,8 @@ static int core_scsi3_pro_reserve(
2480 2510
2481 spin_unlock(&dev->dev_reservation_lock); 2511 spin_unlock(&dev->dev_reservation_lock);
2482 core_scsi3_put_pr_reg(pr_reg); 2512 core_scsi3_put_pr_reg(pr_reg);
2483 return PYX_TRANSPORT_RESERVATION_CONFLICT; 2513 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
2514 return -EINVAL;
2484 } 2515 }
2485 /* 2516 /*
2486 * From spc4r17 Section 5.7.9: Reserving: 2517 * From spc4r17 Section 5.7.9: Reserving:
@@ -2503,7 +2534,8 @@ static int core_scsi3_pro_reserve(
2503 2534
2504 spin_unlock(&dev->dev_reservation_lock); 2535 spin_unlock(&dev->dev_reservation_lock);
2505 core_scsi3_put_pr_reg(pr_reg); 2536 core_scsi3_put_pr_reg(pr_reg);
2506 return PYX_TRANSPORT_RESERVATION_CONFLICT; 2537 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
2538 return -EINVAL;
2507 } 2539 }
2508 /* 2540 /*
2509 * From spc4r17 Section 5.7.9: Reserving: 2541 * From spc4r17 Section 5.7.9: Reserving:
@@ -2517,7 +2549,7 @@ static int core_scsi3_pro_reserve(
2517 */ 2549 */
2518 spin_unlock(&dev->dev_reservation_lock); 2550 spin_unlock(&dev->dev_reservation_lock);
2519 core_scsi3_put_pr_reg(pr_reg); 2551 core_scsi3_put_pr_reg(pr_reg);
2520 return PYX_TRANSPORT_SENT_TO_TRANSPORT; 2552 return 0;
2521 } 2553 }
2522 /* 2554 /*
2523 * Otherwise, our *pr_reg becomes the PR reservation holder for said 2555 * Otherwise, our *pr_reg becomes the PR reservation holder for said
@@ -2574,7 +2606,8 @@ static int core_scsi3_emulate_pro_reserve(
2574 default: 2606 default:
2575 pr_err("SPC-3 PR: Unknown Service Action RESERVE Type:" 2607 pr_err("SPC-3 PR: Unknown Service Action RESERVE Type:"
2576 " 0x%02x\n", type); 2608 " 0x%02x\n", type);
2577 return PYX_TRANSPORT_INVALID_CDB_FIELD; 2609 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
2610 return -EINVAL;
2578 } 2611 }
2579 2612
2580 return ret; 2613 return ret;
@@ -2630,7 +2663,8 @@ static int core_scsi3_emulate_pro_release(
2630 2663
2631 if (!se_sess || !se_lun) { 2664 if (!se_sess || !se_lun) {
2632 pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n"); 2665 pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
2633 return PYX_TRANSPORT_LU_COMM_FAILURE; 2666 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2667 return -EINVAL;
2634 } 2668 }
2635 /* 2669 /*
2636 * Locate the existing *pr_reg via struct se_node_acl pointers 2670 * Locate the existing *pr_reg via struct se_node_acl pointers
@@ -2639,7 +2673,8 @@ static int core_scsi3_emulate_pro_release(
2639 if (!pr_reg) { 2673 if (!pr_reg) {
2640 pr_err("SPC-3 PR: Unable to locate" 2674 pr_err("SPC-3 PR: Unable to locate"
2641 " PR_REGISTERED *pr_reg for RELEASE\n"); 2675 " PR_REGISTERED *pr_reg for RELEASE\n");
2642 return PYX_TRANSPORT_LU_COMM_FAILURE; 2676 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2677 return -EINVAL;
2643 } 2678 }
2644 /* 2679 /*
2645 * From spc4r17 Section 5.7.11.2 Releasing: 2680 * From spc4r17 Section 5.7.11.2 Releasing:
@@ -2661,7 +2696,7 @@ static int core_scsi3_emulate_pro_release(
2661 */ 2696 */
2662 spin_unlock(&dev->dev_reservation_lock); 2697 spin_unlock(&dev->dev_reservation_lock);
2663 core_scsi3_put_pr_reg(pr_reg); 2698 core_scsi3_put_pr_reg(pr_reg);
2664 return PYX_TRANSPORT_SENT_TO_TRANSPORT; 2699 return 0;
2665 } 2700 }
2666 if ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) || 2701 if ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
2667 (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) 2702 (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG))
@@ -2675,7 +2710,7 @@ static int core_scsi3_emulate_pro_release(
2675 */ 2710 */
2676 spin_unlock(&dev->dev_reservation_lock); 2711 spin_unlock(&dev->dev_reservation_lock);
2677 core_scsi3_put_pr_reg(pr_reg); 2712 core_scsi3_put_pr_reg(pr_reg);
2678 return PYX_TRANSPORT_SENT_TO_TRANSPORT; 2713 return 0;
2679 } 2714 }
2680 /* 2715 /*
2681 * From spc4r17 Section 5.7.11.2 Releasing: 2716 * From spc4r17 Section 5.7.11.2 Releasing:
@@ -2697,7 +2732,8 @@ static int core_scsi3_emulate_pro_release(
2697 " 0x%016Lx\n", res_key, pr_reg->pr_res_key); 2732 " 0x%016Lx\n", res_key, pr_reg->pr_res_key);
2698 spin_unlock(&dev->dev_reservation_lock); 2733 spin_unlock(&dev->dev_reservation_lock);
2699 core_scsi3_put_pr_reg(pr_reg); 2734 core_scsi3_put_pr_reg(pr_reg);
2700 return PYX_TRANSPORT_RESERVATION_CONFLICT; 2735 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
2736 return -EINVAL;
2701 } 2737 }
2702 /* 2738 /*
2703 * From spc4r17 Section 5.7.11.2 Releasing and above: 2739 * From spc4r17 Section 5.7.11.2 Releasing and above:
@@ -2719,7 +2755,8 @@ static int core_scsi3_emulate_pro_release(
2719 2755
2720 spin_unlock(&dev->dev_reservation_lock); 2756 spin_unlock(&dev->dev_reservation_lock);
2721 core_scsi3_put_pr_reg(pr_reg); 2757 core_scsi3_put_pr_reg(pr_reg);
2722 return PYX_TRANSPORT_RESERVATION_CONFLICT; 2758 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
2759 return -EINVAL;
2723 } 2760 }
2724 /* 2761 /*
2725 * In response to a persistent reservation release request from the 2762 * In response to a persistent reservation release request from the
@@ -2802,7 +2839,8 @@ static int core_scsi3_emulate_pro_clear(
2802 if (!pr_reg_n) { 2839 if (!pr_reg_n) {
2803 pr_err("SPC-3 PR: Unable to locate" 2840 pr_err("SPC-3 PR: Unable to locate"
2804 " PR_REGISTERED *pr_reg for CLEAR\n"); 2841 " PR_REGISTERED *pr_reg for CLEAR\n");
2805 return PYX_TRANSPORT_LU_COMM_FAILURE; 2842 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2843 return -EINVAL;
2806 } 2844 }
2807 /* 2845 /*
2808 * From spc4r17 section 5.7.11.6, Clearing: 2846 * From spc4r17 section 5.7.11.6, Clearing:
@@ -2821,7 +2859,8 @@ static int core_scsi3_emulate_pro_clear(
2821 " existing SA REGISTER res_key:" 2859 " existing SA REGISTER res_key:"
2822 " 0x%016Lx\n", res_key, pr_reg_n->pr_res_key); 2860 " 0x%016Lx\n", res_key, pr_reg_n->pr_res_key);
2823 core_scsi3_put_pr_reg(pr_reg_n); 2861 core_scsi3_put_pr_reg(pr_reg_n);
2824 return PYX_TRANSPORT_RESERVATION_CONFLICT; 2862 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
2863 return -EINVAL;
2825 } 2864 }
2826 /* 2865 /*
2827 * a) Release the persistent reservation, if any; 2866 * a) Release the persistent reservation, if any;
@@ -2979,8 +3018,10 @@ static int core_scsi3_pro_preempt(
2979 int all_reg = 0, calling_it_nexus = 0, released_regs = 0; 3018 int all_reg = 0, calling_it_nexus = 0, released_regs = 0;
2980 int prh_type = 0, prh_scope = 0, ret; 3019 int prh_type = 0, prh_scope = 0, ret;
2981 3020
2982 if (!se_sess) 3021 if (!se_sess) {
2983 return PYX_TRANSPORT_LU_COMM_FAILURE; 3022 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3023 return -EINVAL;
3024 }
2984 3025
2985 se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; 3026 se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
2986 pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl, 3027 pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
@@ -2989,16 +3030,19 @@ static int core_scsi3_pro_preempt(
2989 pr_err("SPC-3 PR: Unable to locate" 3030 pr_err("SPC-3 PR: Unable to locate"
2990 " PR_REGISTERED *pr_reg for PREEMPT%s\n", 3031 " PR_REGISTERED *pr_reg for PREEMPT%s\n",
2991 (abort) ? "_AND_ABORT" : ""); 3032 (abort) ? "_AND_ABORT" : "");
2992 return PYX_TRANSPORT_RESERVATION_CONFLICT; 3033 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
3034 return -EINVAL;
2993 } 3035 }
2994 if (pr_reg_n->pr_res_key != res_key) { 3036 if (pr_reg_n->pr_res_key != res_key) {
2995 core_scsi3_put_pr_reg(pr_reg_n); 3037 core_scsi3_put_pr_reg(pr_reg_n);
2996 return PYX_TRANSPORT_RESERVATION_CONFLICT; 3038 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
3039 return -EINVAL;
2997 } 3040 }
2998 if (scope != PR_SCOPE_LU_SCOPE) { 3041 if (scope != PR_SCOPE_LU_SCOPE) {
2999 pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope); 3042 pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
3000 core_scsi3_put_pr_reg(pr_reg_n); 3043 core_scsi3_put_pr_reg(pr_reg_n);
3001 return PYX_TRANSPORT_INVALID_PARAMETER_LIST; 3044 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
3045 return -EINVAL;
3002 } 3046 }
3003 INIT_LIST_HEAD(&preempt_and_abort_list); 3047 INIT_LIST_HEAD(&preempt_and_abort_list);
3004 3048
@@ -3012,7 +3056,8 @@ static int core_scsi3_pro_preempt(
3012 if (!all_reg && !sa_res_key) { 3056 if (!all_reg && !sa_res_key) {
3013 spin_unlock(&dev->dev_reservation_lock); 3057 spin_unlock(&dev->dev_reservation_lock);
3014 core_scsi3_put_pr_reg(pr_reg_n); 3058 core_scsi3_put_pr_reg(pr_reg_n);
3015 return PYX_TRANSPORT_INVALID_PARAMETER_LIST; 3059 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
3060 return -EINVAL;
3016 } 3061 }
3017 /* 3062 /*
3018 * From spc4r17, section 5.7.11.4.4 Removing Registrations: 3063 * From spc4r17, section 5.7.11.4.4 Removing Registrations:
@@ -3106,7 +3151,8 @@ static int core_scsi3_pro_preempt(
3106 if (!released_regs) { 3151 if (!released_regs) {
3107 spin_unlock(&dev->dev_reservation_lock); 3152 spin_unlock(&dev->dev_reservation_lock);
3108 core_scsi3_put_pr_reg(pr_reg_n); 3153 core_scsi3_put_pr_reg(pr_reg_n);
3109 return PYX_TRANSPORT_RESERVATION_CONFLICT; 3154 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
3155 return -EINVAL;
3110 } 3156 }
3111 /* 3157 /*
3112 * For an existing all registrants type reservation 3158 * For an existing all registrants type reservation
@@ -3297,7 +3343,8 @@ static int core_scsi3_emulate_pro_preempt(
3297 default: 3343 default:
3298 pr_err("SPC-3 PR: Unknown Service Action PREEMPT%s" 3344 pr_err("SPC-3 PR: Unknown Service Action PREEMPT%s"
3299 " Type: 0x%02x\n", (abort) ? "_AND_ABORT" : "", type); 3345 " Type: 0x%02x\n", (abort) ? "_AND_ABORT" : "", type);
3300 return PYX_TRANSPORT_INVALID_CDB_FIELD; 3346 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
3347 return -EINVAL;
3301 } 3348 }
3302 3349
3303 return ret; 3350 return ret;
@@ -3331,7 +3378,8 @@ static int core_scsi3_emulate_pro_register_and_move(
3331 3378
3332 if (!se_sess || !se_lun) { 3379 if (!se_sess || !se_lun) {
3333 pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n"); 3380 pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
3334 return PYX_TRANSPORT_LU_COMM_FAILURE; 3381 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3382 return -EINVAL;
3335 } 3383 }
3336 memset(dest_iport, 0, 64); 3384 memset(dest_iport, 0, 64);
3337 memset(i_buf, 0, PR_REG_ISID_ID_LEN); 3385 memset(i_buf, 0, PR_REG_ISID_ID_LEN);
@@ -3349,7 +3397,8 @@ static int core_scsi3_emulate_pro_register_and_move(
3349 if (!pr_reg) { 3397 if (!pr_reg) {
3350 pr_err("SPC-3 PR: Unable to locate PR_REGISTERED" 3398 pr_err("SPC-3 PR: Unable to locate PR_REGISTERED"
3351 " *pr_reg for REGISTER_AND_MOVE\n"); 3399 " *pr_reg for REGISTER_AND_MOVE\n");
3352 return PYX_TRANSPORT_LU_COMM_FAILURE; 3400 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3401 return -EINVAL;
3353 } 3402 }
3354 /* 3403 /*
3355 * The provided reservation key much match the existing reservation key 3404 * The provided reservation key much match the existing reservation key
@@ -3360,7 +3409,8 @@ static int core_scsi3_emulate_pro_register_and_move(
3360 " res_key: 0x%016Lx does not match existing SA REGISTER" 3409 " res_key: 0x%016Lx does not match existing SA REGISTER"
3361 " res_key: 0x%016Lx\n", res_key, pr_reg->pr_res_key); 3410 " res_key: 0x%016Lx\n", res_key, pr_reg->pr_res_key);
3362 core_scsi3_put_pr_reg(pr_reg); 3411 core_scsi3_put_pr_reg(pr_reg);
3363 return PYX_TRANSPORT_RESERVATION_CONFLICT; 3412 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
3413 return -EINVAL;
3364 } 3414 }
3365 /* 3415 /*
3366 * The service active reservation key needs to be non zero 3416 * The service active reservation key needs to be non zero
@@ -3369,7 +3419,8 @@ static int core_scsi3_emulate_pro_register_and_move(
3369 pr_warn("SPC-3 PR REGISTER_AND_MOVE: Received zero" 3419 pr_warn("SPC-3 PR REGISTER_AND_MOVE: Received zero"
3370 " sa_res_key\n"); 3420 " sa_res_key\n");
3371 core_scsi3_put_pr_reg(pr_reg); 3421 core_scsi3_put_pr_reg(pr_reg);
3372 return PYX_TRANSPORT_INVALID_PARAMETER_LIST; 3422 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
3423 return -EINVAL;
3373 } 3424 }
3374 3425
3375 /* 3426 /*
@@ -3392,7 +3443,8 @@ static int core_scsi3_emulate_pro_register_and_move(
3392 " does not equal CDB data_length: %u\n", tid_len, 3443 " does not equal CDB data_length: %u\n", tid_len,
3393 cmd->data_length); 3444 cmd->data_length);
3394 core_scsi3_put_pr_reg(pr_reg); 3445 core_scsi3_put_pr_reg(pr_reg);
3395 return PYX_TRANSPORT_INVALID_PARAMETER_LIST; 3446 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
3447 return -EINVAL;
3396 } 3448 }
3397 3449
3398 spin_lock(&dev->se_port_lock); 3450 spin_lock(&dev->se_port_lock);
@@ -3417,7 +3469,8 @@ static int core_scsi3_emulate_pro_register_and_move(
3417 atomic_dec(&dest_se_tpg->tpg_pr_ref_count); 3469 atomic_dec(&dest_se_tpg->tpg_pr_ref_count);
3418 smp_mb__after_atomic_dec(); 3470 smp_mb__after_atomic_dec();
3419 core_scsi3_put_pr_reg(pr_reg); 3471 core_scsi3_put_pr_reg(pr_reg);
3420 return PYX_TRANSPORT_LU_COMM_FAILURE; 3472 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3473 return -EINVAL;
3421 } 3474 }
3422 3475
3423 spin_lock(&dev->se_port_lock); 3476 spin_lock(&dev->se_port_lock);
@@ -3430,7 +3483,8 @@ static int core_scsi3_emulate_pro_register_and_move(
3430 " fabric ops from Relative Target Port Identifier:" 3483 " fabric ops from Relative Target Port Identifier:"
3431 " %hu\n", rtpi); 3484 " %hu\n", rtpi);
3432 core_scsi3_put_pr_reg(pr_reg); 3485 core_scsi3_put_pr_reg(pr_reg);
3433 return PYX_TRANSPORT_INVALID_PARAMETER_LIST; 3486 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
3487 return -EINVAL;
3434 } 3488 }
3435 3489
3436 buf = transport_kmap_first_data_page(cmd); 3490 buf = transport_kmap_first_data_page(cmd);
@@ -3445,14 +3499,16 @@ static int core_scsi3_emulate_pro_register_and_move(
3445 " from fabric: %s\n", proto_ident, 3499 " from fabric: %s\n", proto_ident,
3446 dest_tf_ops->get_fabric_proto_ident(dest_se_tpg), 3500 dest_tf_ops->get_fabric_proto_ident(dest_se_tpg),
3447 dest_tf_ops->get_fabric_name()); 3501 dest_tf_ops->get_fabric_name());
3448 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; 3502 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
3503 ret = -EINVAL;
3449 goto out; 3504 goto out;
3450 } 3505 }
3451 if (dest_tf_ops->tpg_parse_pr_out_transport_id == NULL) { 3506 if (dest_tf_ops->tpg_parse_pr_out_transport_id == NULL) {
3452 pr_err("SPC-3 PR REGISTER_AND_MOVE: Fabric does not" 3507 pr_err("SPC-3 PR REGISTER_AND_MOVE: Fabric does not"
3453 " containg a valid tpg_parse_pr_out_transport_id" 3508 " containg a valid tpg_parse_pr_out_transport_id"
3454 " function pointer\n"); 3509 " function pointer\n");
3455 ret = PYX_TRANSPORT_LU_COMM_FAILURE; 3510 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3511 ret = -EINVAL;
3456 goto out; 3512 goto out;
3457 } 3513 }
3458 initiator_str = dest_tf_ops->tpg_parse_pr_out_transport_id(dest_se_tpg, 3514 initiator_str = dest_tf_ops->tpg_parse_pr_out_transport_id(dest_se_tpg,
@@ -3460,7 +3516,8 @@ static int core_scsi3_emulate_pro_register_and_move(
3460 if (!initiator_str) { 3516 if (!initiator_str) {
3461 pr_err("SPC-3 PR REGISTER_AND_MOVE: Unable to locate" 3517 pr_err("SPC-3 PR REGISTER_AND_MOVE: Unable to locate"
3462 " initiator_str from Transport ID\n"); 3518 " initiator_str from Transport ID\n");
3463 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; 3519 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
3520 ret = -EINVAL;
3464 goto out; 3521 goto out;
3465 } 3522 }
3466 3523
@@ -3489,7 +3546,8 @@ static int core_scsi3_emulate_pro_register_and_move(
3489 pr_err("SPC-3 PR REGISTER_AND_MOVE: TransportID: %s" 3546 pr_err("SPC-3 PR REGISTER_AND_MOVE: TransportID: %s"
3490 " matches: %s on received I_T Nexus\n", initiator_str, 3547 " matches: %s on received I_T Nexus\n", initiator_str,
3491 pr_reg_nacl->initiatorname); 3548 pr_reg_nacl->initiatorname);
3492 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; 3549 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
3550 ret = -EINVAL;
3493 goto out; 3551 goto out;
3494 } 3552 }
3495 if (!strcmp(iport_ptr, pr_reg->pr_reg_isid)) { 3553 if (!strcmp(iport_ptr, pr_reg->pr_reg_isid)) {
@@ -3497,7 +3555,8 @@ static int core_scsi3_emulate_pro_register_and_move(
3497 " matches: %s %s on received I_T Nexus\n", 3555 " matches: %s %s on received I_T Nexus\n",
3498 initiator_str, iport_ptr, pr_reg_nacl->initiatorname, 3556 initiator_str, iport_ptr, pr_reg_nacl->initiatorname,
3499 pr_reg->pr_reg_isid); 3557 pr_reg->pr_reg_isid);
3500 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; 3558 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
3559 ret = -EINVAL;
3501 goto out; 3560 goto out;
3502 } 3561 }
3503after_iport_check: 3562after_iport_check:
@@ -3517,7 +3576,8 @@ after_iport_check:
3517 pr_err("Unable to locate %s dest_node_acl for" 3576 pr_err("Unable to locate %s dest_node_acl for"
3518 " TransportID%s\n", dest_tf_ops->get_fabric_name(), 3577 " TransportID%s\n", dest_tf_ops->get_fabric_name(),
3519 initiator_str); 3578 initiator_str);
3520 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; 3579 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
3580 ret = -EINVAL;
3521 goto out; 3581 goto out;
3522 } 3582 }
3523 ret = core_scsi3_nodeacl_depend_item(dest_node_acl); 3583 ret = core_scsi3_nodeacl_depend_item(dest_node_acl);
@@ -3527,7 +3587,8 @@ after_iport_check:
3527 atomic_dec(&dest_node_acl->acl_pr_ref_count); 3587 atomic_dec(&dest_node_acl->acl_pr_ref_count);
3528 smp_mb__after_atomic_dec(); 3588 smp_mb__after_atomic_dec();
3529 dest_node_acl = NULL; 3589 dest_node_acl = NULL;
3530 ret = PYX_TRANSPORT_LU_COMM_FAILURE; 3590 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
3591 ret = -EINVAL;
3531 goto out; 3592 goto out;
3532 } 3593 }
3533#if 0 3594#if 0
@@ -3543,7 +3604,8 @@ after_iport_check:
3543 if (!dest_se_deve) { 3604 if (!dest_se_deve) {
3544 pr_err("Unable to locate %s dest_se_deve from RTPI:" 3605 pr_err("Unable to locate %s dest_se_deve from RTPI:"
3545 " %hu\n", dest_tf_ops->get_fabric_name(), rtpi); 3606 " %hu\n", dest_tf_ops->get_fabric_name(), rtpi);
3546 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; 3607 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
3608 ret = -EINVAL;
3547 goto out; 3609 goto out;
3548 } 3610 }
3549 3611
@@ -3553,7 +3615,8 @@ after_iport_check:
3553 atomic_dec(&dest_se_deve->pr_ref_count); 3615 atomic_dec(&dest_se_deve->pr_ref_count);
3554 smp_mb__after_atomic_dec(); 3616 smp_mb__after_atomic_dec();
3555 dest_se_deve = NULL; 3617 dest_se_deve = NULL;
3556 ret = PYX_TRANSPORT_LU_COMM_FAILURE; 3618 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3619 ret = -EINVAL;
3557 goto out; 3620 goto out;
3558 } 3621 }
3559#if 0 3622#if 0
@@ -3572,7 +3635,8 @@ after_iport_check:
3572 pr_warn("SPC-3 PR REGISTER_AND_MOVE: No reservation" 3635 pr_warn("SPC-3 PR REGISTER_AND_MOVE: No reservation"
3573 " currently held\n"); 3636 " currently held\n");
3574 spin_unlock(&dev->dev_reservation_lock); 3637 spin_unlock(&dev->dev_reservation_lock);
3575 ret = PYX_TRANSPORT_INVALID_CDB_FIELD; 3638 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
3639 ret = -EINVAL;
3576 goto out; 3640 goto out;
3577 } 3641 }
3578 /* 3642 /*
@@ -3585,7 +3649,8 @@ after_iport_check:
3585 pr_warn("SPC-3 PR REGISTER_AND_MOVE: Calling I_T" 3649 pr_warn("SPC-3 PR REGISTER_AND_MOVE: Calling I_T"
3586 " Nexus is not reservation holder\n"); 3650 " Nexus is not reservation holder\n");
3587 spin_unlock(&dev->dev_reservation_lock); 3651 spin_unlock(&dev->dev_reservation_lock);
3588 ret = PYX_TRANSPORT_RESERVATION_CONFLICT; 3652 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
3653 ret = -EINVAL;
3589 goto out; 3654 goto out;
3590 } 3655 }
3591 /* 3656 /*
@@ -3603,7 +3668,8 @@ after_iport_check:
3603 " reservation for type: %s\n", 3668 " reservation for type: %s\n",
3604 core_scsi3_pr_dump_type(pr_res_holder->pr_res_type)); 3669 core_scsi3_pr_dump_type(pr_res_holder->pr_res_type));
3605 spin_unlock(&dev->dev_reservation_lock); 3670 spin_unlock(&dev->dev_reservation_lock);
3606 ret = PYX_TRANSPORT_RESERVATION_CONFLICT; 3671 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
3672 ret = -EINVAL;
3607 goto out; 3673 goto out;
3608 } 3674 }
3609 pr_res_nacl = pr_res_holder->pr_reg_nacl; 3675 pr_res_nacl = pr_res_holder->pr_reg_nacl;
@@ -3640,7 +3706,8 @@ after_iport_check:
3640 sa_res_key, 0, aptpl, 2, 1); 3706 sa_res_key, 0, aptpl, 2, 1);
3641 if (ret != 0) { 3707 if (ret != 0) {
3642 spin_unlock(&dev->dev_reservation_lock); 3708 spin_unlock(&dev->dev_reservation_lock);
3643 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; 3709 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
3710 ret = -EINVAL;
3644 goto out; 3711 goto out;
3645 } 3712 }
3646 dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl, 3713 dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
@@ -3771,7 +3838,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
3771 pr_err("Received PERSISTENT_RESERVE CDB while legacy" 3838 pr_err("Received PERSISTENT_RESERVE CDB while legacy"
3772 " SPC-2 reservation is held, returning" 3839 " SPC-2 reservation is held, returning"
3773 " RESERVATION_CONFLICT\n"); 3840 " RESERVATION_CONFLICT\n");
3774 ret = PYX_TRANSPORT_RESERVATION_CONFLICT; 3841 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
3842 ret = EINVAL;
3775 goto out; 3843 goto out;
3776 } 3844 }
3777 3845
@@ -3779,13 +3847,16 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
3779 * FIXME: A NULL struct se_session pointer means an this is not coming from 3847 * FIXME: A NULL struct se_session pointer means an this is not coming from
3780 * a $FABRIC_MOD's nexus, but from internal passthrough ops. 3848 * a $FABRIC_MOD's nexus, but from internal passthrough ops.
3781 */ 3849 */
3782 if (!cmd->se_sess) 3850 if (!cmd->se_sess) {
3783 return PYX_TRANSPORT_LU_COMM_FAILURE; 3851 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3852 return -EINVAL;
3853 }
3784 3854
3785 if (cmd->data_length < 24) { 3855 if (cmd->data_length < 24) {
3786 pr_warn("SPC-PR: Received PR OUT parameter list" 3856 pr_warn("SPC-PR: Received PR OUT parameter list"
3787 " length too small: %u\n", cmd->data_length); 3857 " length too small: %u\n", cmd->data_length);
3788 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; 3858 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
3859 ret = -EINVAL;
3789 goto out; 3860 goto out;
3790 } 3861 }
3791 /* 3862 /*
@@ -3820,7 +3891,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
3820 * SPEC_I_PT=1 is only valid for Service action: REGISTER 3891 * SPEC_I_PT=1 is only valid for Service action: REGISTER
3821 */ 3892 */
3822 if (spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER)) { 3893 if (spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER)) {
3823 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; 3894 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
3895 ret = -EINVAL;
3824 goto out; 3896 goto out;
3825 } 3897 }
3826 3898
@@ -3837,7 +3909,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
3837 (cmd->data_length != 24)) { 3909 (cmd->data_length != 24)) {
3838 pr_warn("SPC-PR: Received PR OUT illegal parameter" 3910 pr_warn("SPC-PR: Received PR OUT illegal parameter"
3839 " list length: %u\n", cmd->data_length); 3911 " list length: %u\n", cmd->data_length);
3840 ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; 3912 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
3913 ret = -EINVAL;
3841 goto out; 3914 goto out;
3842 } 3915 }
3843 /* 3916 /*
@@ -3878,7 +3951,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
3878 default: 3951 default:
3879 pr_err("Unknown PERSISTENT_RESERVE_OUT service" 3952 pr_err("Unknown PERSISTENT_RESERVE_OUT service"
3880 " action: 0x%02x\n", cdb[1] & 0x1f); 3953 " action: 0x%02x\n", cdb[1] & 0x1f);
3881 ret = PYX_TRANSPORT_INVALID_CDB_FIELD; 3954 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
3955 ret = -EINVAL;
3882 break; 3956 break;
3883 } 3957 }
3884 3958
@@ -3906,7 +3980,8 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
3906 if (cmd->data_length < 8) { 3980 if (cmd->data_length < 8) {
3907 pr_err("PRIN SA READ_KEYS SCSI Data Length: %u" 3981 pr_err("PRIN SA READ_KEYS SCSI Data Length: %u"
3908 " too small\n", cmd->data_length); 3982 " too small\n", cmd->data_length);
3909 return PYX_TRANSPORT_INVALID_CDB_FIELD; 3983 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
3984 return -EINVAL;
3910 } 3985 }
3911 3986
3912 buf = transport_kmap_first_data_page(cmd); 3987 buf = transport_kmap_first_data_page(cmd);
@@ -3965,7 +4040,8 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
3965 if (cmd->data_length < 8) { 4040 if (cmd->data_length < 8) {
3966 pr_err("PRIN SA READ_RESERVATIONS SCSI Data Length: %u" 4041 pr_err("PRIN SA READ_RESERVATIONS SCSI Data Length: %u"
3967 " too small\n", cmd->data_length); 4042 " too small\n", cmd->data_length);
3968 return PYX_TRANSPORT_INVALID_CDB_FIELD; 4043 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
4044 return -EINVAL;
3969 } 4045 }
3970 4046
3971 buf = transport_kmap_first_data_page(cmd); 4047 buf = transport_kmap_first_data_page(cmd);
@@ -4047,7 +4123,8 @@ static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
4047 if (cmd->data_length < 6) { 4123 if (cmd->data_length < 6) {
4048 pr_err("PRIN SA REPORT_CAPABILITIES SCSI Data Length:" 4124 pr_err("PRIN SA REPORT_CAPABILITIES SCSI Data Length:"
4049 " %u too small\n", cmd->data_length); 4125 " %u too small\n", cmd->data_length);
4050 return PYX_TRANSPORT_INVALID_CDB_FIELD; 4126 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
4127 return -EINVAL;
4051 } 4128 }
4052 4129
4053 buf = transport_kmap_first_data_page(cmd); 4130 buf = transport_kmap_first_data_page(cmd);
@@ -4108,7 +4185,8 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
4108 if (cmd->data_length < 8) { 4185 if (cmd->data_length < 8) {
4109 pr_err("PRIN SA READ_FULL_STATUS SCSI Data Length: %u" 4186 pr_err("PRIN SA READ_FULL_STATUS SCSI Data Length: %u"
4110 " too small\n", cmd->data_length); 4187 " too small\n", cmd->data_length);
4111 return PYX_TRANSPORT_INVALID_CDB_FIELD; 4188 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
4189 return -EINVAL;
4112 } 4190 }
4113 4191
4114 buf = transport_kmap_first_data_page(cmd); 4192 buf = transport_kmap_first_data_page(cmd);
@@ -4255,7 +4333,8 @@ int target_scsi3_emulate_pr_in(struct se_task *task)
4255 pr_err("Received PERSISTENT_RESERVE CDB while legacy" 4333 pr_err("Received PERSISTENT_RESERVE CDB while legacy"
4256 " SPC-2 reservation is held, returning" 4334 " SPC-2 reservation is held, returning"
4257 " RESERVATION_CONFLICT\n"); 4335 " RESERVATION_CONFLICT\n");
4258 return PYX_TRANSPORT_RESERVATION_CONFLICT; 4336 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
4337 return -EINVAL;
4259 } 4338 }
4260 4339
4261 switch (cmd->t_task_cdb[1] & 0x1f) { 4340 switch (cmd->t_task_cdb[1] & 0x1f) {
@@ -4274,7 +4353,8 @@ int target_scsi3_emulate_pr_in(struct se_task *task)
4274 default: 4353 default:
4275 pr_err("Unknown PERSISTENT_RESERVE_IN service" 4354 pr_err("Unknown PERSISTENT_RESERVE_IN service"
4276 " action: 0x%02x\n", cmd->t_task_cdb[1] & 0x1f); 4355 " action: 0x%02x\n", cmd->t_task_cdb[1] & 0x1f);
4277 ret = PYX_TRANSPORT_INVALID_CDB_FIELD; 4356 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
4357 ret = -EINVAL;
4278 break; 4358 break;
4279 } 4359 }
4280 4360
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index ed32e1efe429..8b15e56b0384 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -963,6 +963,7 @@ static inline struct bio *pscsi_get_bio(int sg_num)
963static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg, 963static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg,
964 struct bio **hbio) 964 struct bio **hbio)
965{ 965{
966 struct se_cmd *cmd = task->task_se_cmd;
966 struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr; 967 struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr;
967 u32 task_sg_num = task->task_sg_nents; 968 u32 task_sg_num = task->task_sg_nents;
968 struct bio *bio = NULL, *tbio = NULL; 969 struct bio *bio = NULL, *tbio = NULL;
@@ -971,7 +972,7 @@ static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg,
971 u32 data_len = task->task_size, i, len, bytes, off; 972 u32 data_len = task->task_size, i, len, bytes, off;
972 int nr_pages = (task->task_size + task_sg[0].offset + 973 int nr_pages = (task->task_size + task_sg[0].offset +
973 PAGE_SIZE - 1) >> PAGE_SHIFT; 974 PAGE_SIZE - 1) >> PAGE_SHIFT;
974 int nr_vecs = 0, rc, ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; 975 int nr_vecs = 0, rc;
975 int rw = (task->task_data_direction == DMA_TO_DEVICE); 976 int rw = (task->task_data_direction == DMA_TO_DEVICE);
976 977
977 *hbio = NULL; 978 *hbio = NULL;
@@ -1058,11 +1059,13 @@ fail:
1058 bio->bi_next = NULL; 1059 bio->bi_next = NULL;
1059 bio_endio(bio, 0); /* XXX: should be error */ 1060 bio_endio(bio, 0); /* XXX: should be error */
1060 } 1061 }
1061 return ret; 1062 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1063 return -ENOMEM;
1062} 1064}
1063 1065
1064static int pscsi_do_task(struct se_task *task) 1066static int pscsi_do_task(struct se_task *task)
1065{ 1067{
1068 struct se_cmd *cmd = task->task_se_cmd;
1066 struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr; 1069 struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr;
1067 struct pscsi_plugin_task *pt = PSCSI_TASK(task); 1070 struct pscsi_plugin_task *pt = PSCSI_TASK(task);
1068 struct request *req; 1071 struct request *req;
@@ -1078,7 +1081,9 @@ static int pscsi_do_task(struct se_task *task)
1078 if (!req || IS_ERR(req)) { 1081 if (!req || IS_ERR(req)) {
1079 pr_err("PSCSI: blk_get_request() failed: %ld\n", 1082 pr_err("PSCSI: blk_get_request() failed: %ld\n",
1080 req ? IS_ERR(req) : -ENOMEM); 1083 req ? IS_ERR(req) : -ENOMEM);
1081 return PYX_TRANSPORT_LU_COMM_FAILURE; 1084 cmd->scsi_sense_reason =
1085 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1086 return -ENODEV;
1082 } 1087 }
1083 } else { 1088 } else {
1084 BUG_ON(!task->task_size); 1089 BUG_ON(!task->task_size);
@@ -1087,8 +1092,11 @@ static int pscsi_do_task(struct se_task *task)
1087 * Setup the main struct request for the task->task_sg[] payload 1092 * Setup the main struct request for the task->task_sg[] payload
1088 */ 1093 */
1089 ret = pscsi_map_sg(task, task->task_sg, &hbio); 1094 ret = pscsi_map_sg(task, task->task_sg, &hbio);
1090 if (ret < 0) 1095 if (ret < 0) {
1091 return PYX_TRANSPORT_LU_COMM_FAILURE; 1096 cmd->scsi_sense_reason =
1097 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1098 return ret;
1099 }
1092 1100
1093 req = blk_make_request(pdv->pdv_sd->request_queue, hbio, 1101 req = blk_make_request(pdv->pdv_sd->request_queue, hbio,
1094 GFP_KERNEL); 1102 GFP_KERNEL);
@@ -1115,7 +1123,7 @@ static int pscsi_do_task(struct se_task *task)
1115 (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG), 1123 (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG),
1116 pscsi_req_done); 1124 pscsi_req_done);
1117 1125
1118 return PYX_TRANSPORT_SENT_TO_TRANSPORT; 1126 return 0;
1119 1127
1120fail: 1128fail:
1121 while (hbio) { 1129 while (hbio) {
@@ -1124,7 +1132,8 @@ fail:
1124 bio->bi_next = NULL; 1132 bio->bi_next = NULL;
1125 bio_endio(bio, 0); /* XXX: should be error */ 1133 bio_endio(bio, 0); /* XXX: should be error */
1126 } 1134 }
1127 return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; 1135 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1136 return -ENOMEM;
1128} 1137}
1129 1138
1130/* pscsi_get_sense_buffer(): 1139/* pscsi_get_sense_buffer():
@@ -1198,9 +1207,8 @@ static inline void pscsi_process_SAM_status(
1198 " 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0], 1207 " 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0],
1199 pt->pscsi_result); 1208 pt->pscsi_result);
1200 task->task_scsi_status = SAM_STAT_CHECK_CONDITION; 1209 task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
1201 task->task_error_status = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; 1210 task->task_se_cmd->scsi_sense_reason =
1202 task->task_se_cmd->transport_error_status = 1211 TCM_UNSUPPORTED_SCSI_OPCODE;
1203 PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
1204 transport_complete_task(task, 0); 1212 transport_complete_task(task, 0);
1205 break; 1213 break;
1206 } 1214 }
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 5158d3846f19..02e51faa2f4e 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -343,235 +343,74 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
343 return NULL; 343 return NULL;
344} 344}
345 345
346/* rd_MEMCPY_read(): 346static int rd_MEMCPY(struct rd_request *req, u32 read_rd)
347 *
348 *
349 */
350static int rd_MEMCPY_read(struct rd_request *req)
351{ 347{
352 struct se_task *task = &req->rd_task; 348 struct se_task *task = &req->rd_task;
353 struct rd_dev *dev = req->rd_task.task_se_cmd->se_dev->dev_ptr; 349 struct rd_dev *dev = req->rd_task.task_se_cmd->se_dev->dev_ptr;
354 struct rd_dev_sg_table *table; 350 struct rd_dev_sg_table *table;
355 struct scatterlist *sg_d, *sg_s; 351 struct scatterlist *rd_sg;
356 void *dst, *src; 352 struct sg_mapping_iter m;
357 u32 i = 0, j = 0, dst_offset = 0, src_offset = 0;
358 u32 length, page_end = 0, table_sg_end;
359 u32 rd_offset = req->rd_offset; 353 u32 rd_offset = req->rd_offset;
354 u32 src_len;
360 355
361 table = rd_get_sg_table(dev, req->rd_page); 356 table = rd_get_sg_table(dev, req->rd_page);
362 if (!table) 357 if (!table)
363 return -EINVAL; 358 return -EINVAL;
364 359
365 table_sg_end = (table->page_end_offset - req->rd_page); 360 rd_sg = &table->sg_table[req->rd_page - table->page_start_offset];
366 sg_d = task->task_sg;
367 sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
368 361
369 pr_debug("RD[%u]: Read LBA: %llu, Size: %u Page: %u, Offset:" 362 pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n",
370 " %u\n", dev->rd_dev_id, task->task_lba, req->rd_size, 363 dev->rd_dev_id, read_rd ? "Read" : "Write",
371 req->rd_page, req->rd_offset); 364 task->task_lba, req->rd_size, req->rd_page,
372 365 rd_offset);
373 src_offset = rd_offset;
374 366
367 src_len = PAGE_SIZE - rd_offset;
368 sg_miter_start(&m, task->task_sg, task->task_sg_nents,
369 read_rd ? SG_MITER_TO_SG : SG_MITER_FROM_SG);
375 while (req->rd_size) { 370 while (req->rd_size) {
376 if ((sg_d[i].length - dst_offset) < 371 u32 len;
377 (sg_s[j].length - src_offset)) { 372 void *rd_addr;
378 length = (sg_d[i].length - dst_offset);
379
380 pr_debug("Step 1 - sg_d[%d]: %p length: %d"
381 " offset: %u sg_s[%d].length: %u\n", i,
382 &sg_d[i], sg_d[i].length, sg_d[i].offset, j,
383 sg_s[j].length);
384 pr_debug("Step 1 - length: %u dst_offset: %u"
385 " src_offset: %u\n", length, dst_offset,
386 src_offset);
387
388 if (length > req->rd_size)
389 length = req->rd_size;
390
391 dst = sg_virt(&sg_d[i++]) + dst_offset;
392 BUG_ON(!dst);
393
394 src = sg_virt(&sg_s[j]) + src_offset;
395 BUG_ON(!src);
396
397 dst_offset = 0;
398 src_offset = length;
399 page_end = 0;
400 } else {
401 length = (sg_s[j].length - src_offset);
402
403 pr_debug("Step 2 - sg_d[%d]: %p length: %d"
404 " offset: %u sg_s[%d].length: %u\n", i,
405 &sg_d[i], sg_d[i].length, sg_d[i].offset,
406 j, sg_s[j].length);
407 pr_debug("Step 2 - length: %u dst_offset: %u"
408 " src_offset: %u\n", length, dst_offset,
409 src_offset);
410
411 if (length > req->rd_size)
412 length = req->rd_size;
413
414 dst = sg_virt(&sg_d[i]) + dst_offset;
415 BUG_ON(!dst);
416
417 if (sg_d[i].length == length) {
418 i++;
419 dst_offset = 0;
420 } else
421 dst_offset = length;
422
423 src = sg_virt(&sg_s[j++]) + src_offset;
424 BUG_ON(!src);
425
426 src_offset = 0;
427 page_end = 1;
428 }
429 373
430 memcpy(dst, src, length); 374 sg_miter_next(&m);
375 len = min((u32)m.length, src_len);
376 m.consumed = len;
431 377
432 pr_debug("page: %u, remaining size: %u, length: %u," 378 rd_addr = sg_virt(rd_sg) + rd_offset;
433 " i: %u, j: %u\n", req->rd_page,
434 (req->rd_size - length), length, i, j);
435 379
436 req->rd_size -= length; 380 if (read_rd)
437 if (!req->rd_size) 381 memcpy(m.addr, rd_addr, len);
438 return 0; 382 else
383 memcpy(rd_addr, m.addr, len);
439 384
440 if (!page_end) 385 req->rd_size -= len;
386 if (!req->rd_size)
441 continue; 387 continue;
442 388
443 if (++req->rd_page <= table->page_end_offset) { 389 src_len -= len;
444 pr_debug("page: %u in same page table\n", 390 if (src_len) {
445 req->rd_page); 391 rd_offset += len;
446 continue; 392 continue;
447 } 393 }
448 394
449 pr_debug("getting new page table for page: %u\n", 395 /* rd page completed, next one please */
450 req->rd_page); 396 req->rd_page++;
451 397 rd_offset = 0;
452 table = rd_get_sg_table(dev, req->rd_page); 398 src_len = PAGE_SIZE;
453 if (!table) 399 if (req->rd_page <= table->page_end_offset) {
454 return -EINVAL; 400 rd_sg++;
455
456 sg_s = &table->sg_table[j = 0];
457 }
458
459 return 0;
460}
461
462/* rd_MEMCPY_write():
463 *
464 *
465 */
466static int rd_MEMCPY_write(struct rd_request *req)
467{
468 struct se_task *task = &req->rd_task;
469 struct rd_dev *dev = req->rd_task.task_se_cmd->se_dev->dev_ptr;
470 struct rd_dev_sg_table *table;
471 struct scatterlist *sg_d, *sg_s;
472 void *dst, *src;
473 u32 i = 0, j = 0, dst_offset = 0, src_offset = 0;
474 u32 length, page_end = 0, table_sg_end;
475 u32 rd_offset = req->rd_offset;
476
477 table = rd_get_sg_table(dev, req->rd_page);
478 if (!table)
479 return -EINVAL;
480
481 table_sg_end = (table->page_end_offset - req->rd_page);
482 sg_d = &table->sg_table[req->rd_page - table->page_start_offset];
483 sg_s = task->task_sg;
484
485 pr_debug("RD[%d] Write LBA: %llu, Size: %u, Page: %u,"
486 " Offset: %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
487 req->rd_page, req->rd_offset);
488
489 dst_offset = rd_offset;
490
491 while (req->rd_size) {
492 if ((sg_s[i].length - src_offset) <
493 (sg_d[j].length - dst_offset)) {
494 length = (sg_s[i].length - src_offset);
495
496 pr_debug("Step 1 - sg_s[%d]: %p length: %d"
497 " offset: %d sg_d[%d].length: %u\n", i,
498 &sg_s[i], sg_s[i].length, sg_s[i].offset,
499 j, sg_d[j].length);
500 pr_debug("Step 1 - length: %u src_offset: %u"
501 " dst_offset: %u\n", length, src_offset,
502 dst_offset);
503
504 if (length > req->rd_size)
505 length = req->rd_size;
506
507 src = sg_virt(&sg_s[i++]) + src_offset;
508 BUG_ON(!src);
509
510 dst = sg_virt(&sg_d[j]) + dst_offset;
511 BUG_ON(!dst);
512
513 src_offset = 0;
514 dst_offset = length;
515 page_end = 0;
516 } else {
517 length = (sg_d[j].length - dst_offset);
518
519 pr_debug("Step 2 - sg_s[%d]: %p length: %d"
520 " offset: %d sg_d[%d].length: %u\n", i,
521 &sg_s[i], sg_s[i].length, sg_s[i].offset,
522 j, sg_d[j].length);
523 pr_debug("Step 2 - length: %u src_offset: %u"
524 " dst_offset: %u\n", length, src_offset,
525 dst_offset);
526
527 if (length > req->rd_size)
528 length = req->rd_size;
529
530 src = sg_virt(&sg_s[i]) + src_offset;
531 BUG_ON(!src);
532
533 if (sg_s[i].length == length) {
534 i++;
535 src_offset = 0;
536 } else
537 src_offset = length;
538
539 dst = sg_virt(&sg_d[j++]) + dst_offset;
540 BUG_ON(!dst);
541
542 dst_offset = 0;
543 page_end = 1;
544 }
545
546 memcpy(dst, src, length);
547
548 pr_debug("page: %u, remaining size: %u, length: %u,"
549 " i: %u, j: %u\n", req->rd_page,
550 (req->rd_size - length), length, i, j);
551
552 req->rd_size -= length;
553 if (!req->rd_size)
554 return 0;
555
556 if (!page_end)
557 continue;
558
559 if (++req->rd_page <= table->page_end_offset) {
560 pr_debug("page: %u in same page table\n",
561 req->rd_page);
562 continue; 401 continue;
563 } 402 }
564 403
565 pr_debug("getting new page table for page: %u\n",
566 req->rd_page);
567
568 table = rd_get_sg_table(dev, req->rd_page); 404 table = rd_get_sg_table(dev, req->rd_page);
569 if (!table) 405 if (!table) {
406 sg_miter_stop(&m);
570 return -EINVAL; 407 return -EINVAL;
408 }
571 409
572 sg_d = &table->sg_table[j = 0]; 410 /* since we increment, the first sg entry is correct */
411 rd_sg = table->sg_table;
573 } 412 }
574 413 sg_miter_stop(&m);
575 return 0; 414 return 0;
576} 415}
577 416
@@ -583,28 +422,21 @@ static int rd_MEMCPY_do_task(struct se_task *task)
583{ 422{
584 struct se_device *dev = task->task_se_cmd->se_dev; 423 struct se_device *dev = task->task_se_cmd->se_dev;
585 struct rd_request *req = RD_REQ(task); 424 struct rd_request *req = RD_REQ(task);
586 unsigned long long lba; 425 u64 tmp;
587 int ret; 426 int ret;
588 427
589 req->rd_page = (task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size) / PAGE_SIZE; 428 tmp = task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size;
590 lba = task->task_lba; 429 req->rd_offset = do_div(tmp, PAGE_SIZE);
591 req->rd_offset = (do_div(lba, 430 req->rd_page = tmp;
592 (PAGE_SIZE / dev->se_sub_dev->se_dev_attrib.block_size))) *
593 dev->se_sub_dev->se_dev_attrib.block_size;
594 req->rd_size = task->task_size; 431 req->rd_size = task->task_size;
595 432
596 if (task->task_data_direction == DMA_FROM_DEVICE) 433 ret = rd_MEMCPY(req, task->task_data_direction == DMA_FROM_DEVICE);
597 ret = rd_MEMCPY_read(req);
598 else
599 ret = rd_MEMCPY_write(req);
600
601 if (ret != 0) 434 if (ret != 0)
602 return ret; 435 return ret;
603 436
604 task->task_scsi_status = GOOD; 437 task->task_scsi_status = GOOD;
605 transport_complete_task(task, 1); 438 transport_complete_task(task, 1);
606 439 return 0;
607 return PYX_TRANSPORT_SENT_TO_TRANSPORT;
608} 440}
609 441
610/* rd_free_task(): (Part of se_subsystem_api_t template) 442/* rd_free_task(): (Part of se_subsystem_api_t template)
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 217e29df6297..684522805a1f 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -345,10 +345,6 @@ static void core_tmr_drain_cmd_list(
345 " %d t_fe_count: %d\n", (preempt_and_abort_list) ? 345 " %d t_fe_count: %d\n", (preempt_and_abort_list) ?
346 "Preempt" : "", cmd, cmd->t_state, 346 "Preempt" : "", cmd, cmd->t_state,
347 atomic_read(&cmd->t_fe_count)); 347 atomic_read(&cmd->t_fe_count));
348 /*
349 * Signal that the command has failed via cmd->se_cmd_flags,
350 */
351 transport_new_cmd_failure(cmd);
352 348
353 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, 349 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas,
354 atomic_read(&cmd->t_fe_count)); 350 atomic_read(&cmd->t_fe_count));
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 3400ae6e93f8..0257658e2e3e 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -61,7 +61,6 @@
61static int sub_api_initialized; 61static int sub_api_initialized;
62 62
63static struct workqueue_struct *target_completion_wq; 63static struct workqueue_struct *target_completion_wq;
64static struct kmem_cache *se_cmd_cache;
65static struct kmem_cache *se_sess_cache; 64static struct kmem_cache *se_sess_cache;
66struct kmem_cache *se_tmr_req_cache; 65struct kmem_cache *se_tmr_req_cache;
67struct kmem_cache *se_ua_cache; 66struct kmem_cache *se_ua_cache;
@@ -82,24 +81,18 @@ static int transport_generic_get_mem(struct se_cmd *cmd);
82static void transport_put_cmd(struct se_cmd *cmd); 81static void transport_put_cmd(struct se_cmd *cmd);
83static void transport_remove_cmd_from_queue(struct se_cmd *cmd); 82static void transport_remove_cmd_from_queue(struct se_cmd *cmd);
84static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq); 83static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
85static void transport_generic_request_failure(struct se_cmd *, int, int); 84static void transport_generic_request_failure(struct se_cmd *);
86static void target_complete_ok_work(struct work_struct *work); 85static void target_complete_ok_work(struct work_struct *work);
87 86
88int init_se_kmem_caches(void) 87int init_se_kmem_caches(void)
89{ 88{
90 se_cmd_cache = kmem_cache_create("se_cmd_cache",
91 sizeof(struct se_cmd), __alignof__(struct se_cmd), 0, NULL);
92 if (!se_cmd_cache) {
93 pr_err("kmem_cache_create for struct se_cmd failed\n");
94 goto out;
95 }
96 se_tmr_req_cache = kmem_cache_create("se_tmr_cache", 89 se_tmr_req_cache = kmem_cache_create("se_tmr_cache",
97 sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req), 90 sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req),
98 0, NULL); 91 0, NULL);
99 if (!se_tmr_req_cache) { 92 if (!se_tmr_req_cache) {
100 pr_err("kmem_cache_create() for struct se_tmr_req" 93 pr_err("kmem_cache_create() for struct se_tmr_req"
101 " failed\n"); 94 " failed\n");
102 goto out_free_cmd_cache; 95 goto out;
103 } 96 }
104 se_sess_cache = kmem_cache_create("se_sess_cache", 97 se_sess_cache = kmem_cache_create("se_sess_cache",
105 sizeof(struct se_session), __alignof__(struct se_session), 98 sizeof(struct se_session), __alignof__(struct se_session),
@@ -182,8 +175,6 @@ out_free_sess_cache:
182 kmem_cache_destroy(se_sess_cache); 175 kmem_cache_destroy(se_sess_cache);
183out_free_tmr_req_cache: 176out_free_tmr_req_cache:
184 kmem_cache_destroy(se_tmr_req_cache); 177 kmem_cache_destroy(se_tmr_req_cache);
185out_free_cmd_cache:
186 kmem_cache_destroy(se_cmd_cache);
187out: 178out:
188 return -ENOMEM; 179 return -ENOMEM;
189} 180}
@@ -191,7 +182,6 @@ out:
191void release_se_kmem_caches(void) 182void release_se_kmem_caches(void)
192{ 183{
193 destroy_workqueue(target_completion_wq); 184 destroy_workqueue(target_completion_wq);
194 kmem_cache_destroy(se_cmd_cache);
195 kmem_cache_destroy(se_tmr_req_cache); 185 kmem_cache_destroy(se_tmr_req_cache);
196 kmem_cache_destroy(se_sess_cache); 186 kmem_cache_destroy(se_sess_cache);
197 kmem_cache_destroy(se_ua_cache); 187 kmem_cache_destroy(se_ua_cache);
@@ -680,9 +670,9 @@ void transport_complete_sync_cache(struct se_cmd *cmd, int good)
680 task->task_scsi_status = GOOD; 670 task->task_scsi_status = GOOD;
681 } else { 671 } else {
682 task->task_scsi_status = SAM_STAT_CHECK_CONDITION; 672 task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
683 task->task_error_status = PYX_TRANSPORT_ILLEGAL_REQUEST; 673 task->task_se_cmd->scsi_sense_reason =
684 task->task_se_cmd->transport_error_status = 674 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
685 PYX_TRANSPORT_ILLEGAL_REQUEST; 675
686 } 676 }
687 677
688 transport_complete_task(task, good); 678 transport_complete_task(task, good);
@@ -693,7 +683,7 @@ static void target_complete_failure_work(struct work_struct *work)
693{ 683{
694 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 684 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
695 685
696 transport_generic_request_failure(cmd, 1, 1); 686 transport_generic_request_failure(cmd);
697} 687}
698 688
699/* transport_complete_task(): 689/* transport_complete_task():
@@ -755,10 +745,11 @@ void transport_complete_task(struct se_task *task, int success)
755 if (cmd->t_tasks_failed) { 745 if (cmd->t_tasks_failed) {
756 if (!task->task_error_status) { 746 if (!task->task_error_status) {
757 task->task_error_status = 747 task->task_error_status =
758 PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; 748 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
759 cmd->transport_error_status = 749 cmd->scsi_sense_reason =
760 PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; 750 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
761 } 751 }
752
762 INIT_WORK(&cmd->work, target_complete_failure_work); 753 INIT_WORK(&cmd->work, target_complete_failure_work);
763 } else { 754 } else {
764 atomic_set(&cmd->t_transport_complete, 1); 755 atomic_set(&cmd->t_transport_complete, 1);
@@ -1335,23 +1326,17 @@ struct se_device *transport_add_device_to_core_hba(
1335 dev->se_hba = hba; 1326 dev->se_hba = hba;
1336 dev->se_sub_dev = se_dev; 1327 dev->se_sub_dev = se_dev;
1337 dev->transport = transport; 1328 dev->transport = transport;
1338 atomic_set(&dev->active_cmds, 0);
1339 INIT_LIST_HEAD(&dev->dev_list); 1329 INIT_LIST_HEAD(&dev->dev_list);
1340 INIT_LIST_HEAD(&dev->dev_sep_list); 1330 INIT_LIST_HEAD(&dev->dev_sep_list);
1341 INIT_LIST_HEAD(&dev->dev_tmr_list); 1331 INIT_LIST_HEAD(&dev->dev_tmr_list);
1342 INIT_LIST_HEAD(&dev->execute_task_list); 1332 INIT_LIST_HEAD(&dev->execute_task_list);
1343 INIT_LIST_HEAD(&dev->delayed_cmd_list); 1333 INIT_LIST_HEAD(&dev->delayed_cmd_list);
1344 INIT_LIST_HEAD(&dev->ordered_cmd_list);
1345 INIT_LIST_HEAD(&dev->state_task_list); 1334 INIT_LIST_HEAD(&dev->state_task_list);
1346 INIT_LIST_HEAD(&dev->qf_cmd_list); 1335 INIT_LIST_HEAD(&dev->qf_cmd_list);
1347 spin_lock_init(&dev->execute_task_lock); 1336 spin_lock_init(&dev->execute_task_lock);
1348 spin_lock_init(&dev->delayed_cmd_lock); 1337 spin_lock_init(&dev->delayed_cmd_lock);
1349 spin_lock_init(&dev->ordered_cmd_lock);
1350 spin_lock_init(&dev->state_task_lock);
1351 spin_lock_init(&dev->dev_alua_lock);
1352 spin_lock_init(&dev->dev_reservation_lock); 1338 spin_lock_init(&dev->dev_reservation_lock);
1353 spin_lock_init(&dev->dev_status_lock); 1339 spin_lock_init(&dev->dev_status_lock);
1354 spin_lock_init(&dev->dev_status_thr_lock);
1355 spin_lock_init(&dev->se_port_lock); 1340 spin_lock_init(&dev->se_port_lock);
1356 spin_lock_init(&dev->se_tmr_lock); 1341 spin_lock_init(&dev->se_tmr_lock);
1357 spin_lock_init(&dev->qf_cmd_lock); 1342 spin_lock_init(&dev->qf_cmd_lock);
@@ -1507,7 +1492,6 @@ void transport_init_se_cmd(
1507{ 1492{
1508 INIT_LIST_HEAD(&cmd->se_lun_node); 1493 INIT_LIST_HEAD(&cmd->se_lun_node);
1509 INIT_LIST_HEAD(&cmd->se_delayed_node); 1494 INIT_LIST_HEAD(&cmd->se_delayed_node);
1510 INIT_LIST_HEAD(&cmd->se_ordered_node);
1511 INIT_LIST_HEAD(&cmd->se_qf_node); 1495 INIT_LIST_HEAD(&cmd->se_qf_node);
1512 INIT_LIST_HEAD(&cmd->se_queue_node); 1496 INIT_LIST_HEAD(&cmd->se_queue_node);
1513 INIT_LIST_HEAD(&cmd->se_cmd_list); 1497 INIT_LIST_HEAD(&cmd->se_cmd_list);
@@ -1573,6 +1557,8 @@ int transport_generic_allocate_tasks(
1573 pr_err("Received SCSI CDB with command_size: %d that" 1557 pr_err("Received SCSI CDB with command_size: %d that"
1574 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", 1558 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1575 scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE); 1559 scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
1560 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1561 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
1576 return -EINVAL; 1562 return -EINVAL;
1577 } 1563 }
1578 /* 1564 /*
@@ -1588,6 +1574,9 @@ int transport_generic_allocate_tasks(
1588 " %u > sizeof(cmd->__t_task_cdb): %lu ops\n", 1574 " %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
1589 scsi_command_size(cdb), 1575 scsi_command_size(cdb),
1590 (unsigned long)sizeof(cmd->__t_task_cdb)); 1576 (unsigned long)sizeof(cmd->__t_task_cdb));
1577 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1578 cmd->scsi_sense_reason =
1579 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1591 return -ENOMEM; 1580 return -ENOMEM;
1592 } 1581 }
1593 } else 1582 } else
@@ -1658,11 +1647,9 @@ int transport_handle_cdb_direct(
1658 * and call transport_generic_request_failure() if necessary.. 1647 * and call transport_generic_request_failure() if necessary..
1659 */ 1648 */
1660 ret = transport_generic_new_cmd(cmd); 1649 ret = transport_generic_new_cmd(cmd);
1661 if (ret < 0) { 1650 if (ret < 0)
1662 cmd->transport_error_status = ret; 1651 transport_generic_request_failure(cmd);
1663 transport_generic_request_failure(cmd, 0, 1652
1664 (cmd->data_direction != DMA_TO_DEVICE));
1665 }
1666 return 0; 1653 return 0;
1667} 1654}
1668EXPORT_SYMBOL(transport_handle_cdb_direct); 1655EXPORT_SYMBOL(transport_handle_cdb_direct);
@@ -1798,20 +1785,16 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
1798/* 1785/*
1799 * Handle SAM-esque emulation for generic transport request failures. 1786 * Handle SAM-esque emulation for generic transport request failures.
1800 */ 1787 */
1801static void transport_generic_request_failure( 1788static void transport_generic_request_failure(struct se_cmd *cmd)
1802 struct se_cmd *cmd,
1803 int complete,
1804 int sc)
1805{ 1789{
1806 int ret = 0; 1790 int ret = 0;
1807 1791
1808 pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x" 1792 pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
1809 " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd), 1793 " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
1810 cmd->t_task_cdb[0]); 1794 cmd->t_task_cdb[0]);
1811 pr_debug("-----[ i_state: %d t_state: %d transport_error_status: %d\n", 1795 pr_debug("-----[ i_state: %d t_state: %d scsi_sense_reason: %d\n",
1812 cmd->se_tfo->get_cmd_state(cmd), 1796 cmd->se_tfo->get_cmd_state(cmd),
1813 cmd->t_state, 1797 cmd->t_state, cmd->scsi_sense_reason);
1814 cmd->transport_error_status);
1815 pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d" 1798 pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d"
1816 " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --" 1799 " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --"
1817 " t_transport_active: %d t_transport_stop: %d" 1800 " t_transport_active: %d t_transport_stop: %d"
@@ -1829,46 +1812,19 @@ static void transport_generic_request_failure(
1829 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) 1812 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
1830 transport_complete_task_attr(cmd); 1813 transport_complete_task_attr(cmd);
1831 1814
1832 if (complete) { 1815 switch (cmd->scsi_sense_reason) {
1833 cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE; 1816 case TCM_NON_EXISTENT_LUN:
1834 } 1817 case TCM_UNSUPPORTED_SCSI_OPCODE:
1835 1818 case TCM_INVALID_CDB_FIELD:
1836 switch (cmd->transport_error_status) { 1819 case TCM_INVALID_PARAMETER_LIST:
1837 case PYX_TRANSPORT_UNKNOWN_SAM_OPCODE: 1820 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
1838 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; 1821 case TCM_UNKNOWN_MODE_PAGE:
1839 break; 1822 case TCM_WRITE_PROTECTED:
1840 case PYX_TRANSPORT_REQ_TOO_MANY_SECTORS: 1823 case TCM_CHECK_CONDITION_ABORT_CMD:
1841 cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY; 1824 case TCM_CHECK_CONDITION_UNIT_ATTENTION:
1842 break; 1825 case TCM_CHECK_CONDITION_NOT_READY:
1843 case PYX_TRANSPORT_INVALID_CDB_FIELD:
1844 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
1845 break;
1846 case PYX_TRANSPORT_INVALID_PARAMETER_LIST:
1847 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
1848 break;
1849 case PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES:
1850 if (!sc)
1851 transport_new_cmd_failure(cmd);
1852 /*
1853 * Currently for PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES,
1854 * we force this session to fall back to session
1855 * recovery.
1856 */
1857 cmd->se_tfo->fall_back_to_erl0(cmd->se_sess);
1858 cmd->se_tfo->stop_session(cmd->se_sess, 0, 0);
1859
1860 goto check_stop;
1861 case PYX_TRANSPORT_LU_COMM_FAILURE:
1862 case PYX_TRANSPORT_ILLEGAL_REQUEST:
1863 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1864 break;
1865 case PYX_TRANSPORT_UNKNOWN_MODE_PAGE:
1866 cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE;
1867 break;
1868 case PYX_TRANSPORT_WRITE_PROTECTED:
1869 cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
1870 break; 1826 break;
1871 case PYX_TRANSPORT_RESERVATION_CONFLICT: 1827 case TCM_RESERVATION_CONFLICT:
1872 /* 1828 /*
1873 * No SENSE Data payload for this case, set SCSI Status 1829 * No SENSE Data payload for this case, set SCSI Status
1874 * and queue the response to $FABRIC_MOD. 1830 * and queue the response to $FABRIC_MOD.
@@ -1893,15 +1849,9 @@ static void transport_generic_request_failure(
1893 if (ret == -EAGAIN || ret == -ENOMEM) 1849 if (ret == -EAGAIN || ret == -ENOMEM)
1894 goto queue_full; 1850 goto queue_full;
1895 goto check_stop; 1851 goto check_stop;
1896 case PYX_TRANSPORT_USE_SENSE_REASON:
1897 /*
1898 * struct se_cmd->scsi_sense_reason already set
1899 */
1900 break;
1901 default: 1852 default:
1902 pr_err("Unknown transport error for CDB 0x%02x: %d\n", 1853 pr_err("Unknown transport error for CDB 0x%02x: %d\n",
1903 cmd->t_task_cdb[0], 1854 cmd->t_task_cdb[0], cmd->scsi_sense_reason);
1904 cmd->transport_error_status);
1905 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; 1855 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
1906 break; 1856 break;
1907 } 1857 }
@@ -1912,14 +1862,10 @@ static void transport_generic_request_failure(
1912 * transport_send_check_condition_and_sense() after handling 1862 * transport_send_check_condition_and_sense() after handling
1913 * possible unsoliticied write data payloads. 1863 * possible unsoliticied write data payloads.
1914 */ 1864 */
1915 if (!sc && !cmd->se_tfo->new_cmd_map) 1865 ret = transport_send_check_condition_and_sense(cmd,
1916 transport_new_cmd_failure(cmd); 1866 cmd->scsi_sense_reason, 0);
1917 else { 1867 if (ret == -EAGAIN || ret == -ENOMEM)
1918 ret = transport_send_check_condition_and_sense(cmd, 1868 goto queue_full;
1919 cmd->scsi_sense_reason, 0);
1920 if (ret == -EAGAIN || ret == -ENOMEM)
1921 goto queue_full;
1922 }
1923 1869
1924check_stop: 1870check_stop:
1925 transport_lun_remove_cmd(cmd); 1871 transport_lun_remove_cmd(cmd);
@@ -2002,19 +1948,12 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd)
2002 * to allow the passed struct se_cmd list of tasks to the front of the list. 1948 * to allow the passed struct se_cmd list of tasks to the front of the list.
2003 */ 1949 */
2004 if (cmd->sam_task_attr == MSG_HEAD_TAG) { 1950 if (cmd->sam_task_attr == MSG_HEAD_TAG) {
2005 atomic_inc(&cmd->se_dev->dev_hoq_count);
2006 smp_mb__after_atomic_inc();
2007 pr_debug("Added HEAD_OF_QUEUE for CDB:" 1951 pr_debug("Added HEAD_OF_QUEUE for CDB:"
2008 " 0x%02x, se_ordered_id: %u\n", 1952 " 0x%02x, se_ordered_id: %u\n",
2009 cmd->t_task_cdb[0], 1953 cmd->t_task_cdb[0],
2010 cmd->se_ordered_id); 1954 cmd->se_ordered_id);
2011 return 1; 1955 return 1;
2012 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { 1956 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
2013 spin_lock(&cmd->se_dev->ordered_cmd_lock);
2014 list_add_tail(&cmd->se_ordered_node,
2015 &cmd->se_dev->ordered_cmd_list);
2016 spin_unlock(&cmd->se_dev->ordered_cmd_lock);
2017
2018 atomic_inc(&cmd->se_dev->dev_ordered_sync); 1957 atomic_inc(&cmd->se_dev->dev_ordered_sync);
2019 smp_mb__after_atomic_inc(); 1958 smp_mb__after_atomic_inc();
2020 1959
@@ -2076,9 +2015,9 @@ static int transport_execute_tasks(struct se_cmd *cmd)
2076{ 2015{
2077 int add_tasks; 2016 int add_tasks;
2078 2017
2079 if (se_dev_check_online(cmd->se_orig_obj_ptr) != 0) { 2018 if (se_dev_check_online(cmd->se_dev) != 0) {
2080 cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE; 2019 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2081 transport_generic_request_failure(cmd, 0, 1); 2020 transport_generic_request_failure(cmd);
2082 return 0; 2021 return 0;
2083 } 2022 }
2084 2023
@@ -2163,14 +2102,13 @@ check_depth:
2163 else 2102 else
2164 error = dev->transport->do_task(task); 2103 error = dev->transport->do_task(task);
2165 if (error != 0) { 2104 if (error != 0) {
2166 cmd->transport_error_status = error;
2167 spin_lock_irqsave(&cmd->t_state_lock, flags); 2105 spin_lock_irqsave(&cmd->t_state_lock, flags);
2168 task->task_flags &= ~TF_ACTIVE; 2106 task->task_flags &= ~TF_ACTIVE;
2169 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2107 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2170 atomic_set(&cmd->t_transport_sent, 0); 2108 atomic_set(&cmd->t_transport_sent, 0);
2171 transport_stop_tasks_for_cmd(cmd); 2109 transport_stop_tasks_for_cmd(cmd);
2172 atomic_inc(&dev->depth_left); 2110 atomic_inc(&dev->depth_left);
2173 transport_generic_request_failure(cmd, 0, 1); 2111 transport_generic_request_failure(cmd);
2174 } 2112 }
2175 2113
2176 goto check_depth; 2114 goto check_depth;
@@ -2178,19 +2116,6 @@ check_depth:
2178 return 0; 2116 return 0;
2179} 2117}
2180 2118
2181void transport_new_cmd_failure(struct se_cmd *se_cmd)
2182{
2183 unsigned long flags;
2184 /*
2185 * Any unsolicited data will get dumped for failed command inside of
2186 * the fabric plugin
2187 */
2188 spin_lock_irqsave(&se_cmd->t_state_lock, flags);
2189 se_cmd->se_cmd_flags |= SCF_SE_CMD_FAILED;
2190 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
2191 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
2192}
2193
2194static inline u32 transport_get_sectors_6( 2119static inline u32 transport_get_sectors_6(
2195 unsigned char *cdb, 2120 unsigned char *cdb,
2196 struct se_cmd *cmd, 2121 struct se_cmd *cmd,
@@ -2213,10 +2138,15 @@ static inline u32 transport_get_sectors_6(
2213 2138
2214 /* 2139 /*
2215 * Everything else assume TYPE_DISK Sector CDB location. 2140 * Everything else assume TYPE_DISK Sector CDB location.
2216 * Use 8-bit sector value. 2141 * Use 8-bit sector value. SBC-3 says:
2142 *
2143 * A TRANSFER LENGTH field set to zero specifies that 256
2144 * logical blocks shall be written. Any other value
2145 * specifies the number of logical blocks that shall be
2146 * written.
2217 */ 2147 */
2218type_disk: 2148type_disk:
2219 return (u32)cdb[4]; 2149 return cdb[4] ? : 256;
2220} 2150}
2221 2151
2222static inline u32 transport_get_sectors_10( 2152static inline u32 transport_get_sectors_10(
@@ -2460,27 +2390,6 @@ static int transport_get_sense_data(struct se_cmd *cmd)
2460 return -1; 2390 return -1;
2461} 2391}
2462 2392
2463static int
2464transport_handle_reservation_conflict(struct se_cmd *cmd)
2465{
2466 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
2467 cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
2468 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
2469 /*
2470 * For UA Interlock Code 11b, a RESERVATION CONFLICT will
2471 * establish a UNIT ATTENTION with PREVIOUS RESERVATION
2472 * CONFLICT STATUS.
2473 *
2474 * See spc4r17, section 7.4.6 Control Mode Page, Table 349
2475 */
2476 if (cmd->se_sess &&
2477 cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2)
2478 core_scsi3_ua_allocate(cmd->se_sess->se_node_acl,
2479 cmd->orig_fe_lun, 0x2C,
2480 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
2481 return -EINVAL;
2482}
2483
2484static inline long long transport_dev_end_lba(struct se_device *dev) 2393static inline long long transport_dev_end_lba(struct se_device *dev)
2485{ 2394{
2486 return dev->transport->get_blocks(dev) + 1; 2395 return dev->transport->get_blocks(dev) + 1;
@@ -2595,8 +2504,12 @@ static int transport_generic_cmd_sequencer(
2595 */ 2504 */
2596 if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type) != 0) { 2505 if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type) != 0) {
2597 if (su_dev->t10_pr.pr_ops.t10_seq_non_holder( 2506 if (su_dev->t10_pr.pr_ops.t10_seq_non_holder(
2598 cmd, cdb, pr_reg_type) != 0) 2507 cmd, cdb, pr_reg_type) != 0) {
2599 return transport_handle_reservation_conflict(cmd); 2508 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
2509 cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
2510 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
2511 return -EBUSY;
2512 }
2600 /* 2513 /*
2601 * This means the CDB is allowed for the SCSI Initiator port 2514 * This means the CDB is allowed for the SCSI Initiator port
2602 * when said port is *NOT* holding the legacy SPC-2 or 2515 * when said port is *NOT* holding the legacy SPC-2 or
@@ -2658,7 +2571,8 @@ static int transport_generic_cmd_sequencer(
2658 goto out_unsupported_cdb; 2571 goto out_unsupported_cdb;
2659 size = transport_get_size(sectors, cdb, cmd); 2572 size = transport_get_size(sectors, cdb, cmd);
2660 cmd->t_task_lba = transport_lba_32(cdb); 2573 cmd->t_task_lba = transport_lba_32(cdb);
2661 cmd->t_tasks_fua = (cdb[1] & 0x8); 2574 if (cdb[1] & 0x8)
2575 cmd->se_cmd_flags |= SCF_FUA;
2662 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; 2576 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2663 break; 2577 break;
2664 case WRITE_12: 2578 case WRITE_12:
@@ -2667,7 +2581,8 @@ static int transport_generic_cmd_sequencer(
2667 goto out_unsupported_cdb; 2581 goto out_unsupported_cdb;
2668 size = transport_get_size(sectors, cdb, cmd); 2582 size = transport_get_size(sectors, cdb, cmd);
2669 cmd->t_task_lba = transport_lba_32(cdb); 2583 cmd->t_task_lba = transport_lba_32(cdb);
2670 cmd->t_tasks_fua = (cdb[1] & 0x8); 2584 if (cdb[1] & 0x8)
2585 cmd->se_cmd_flags |= SCF_FUA;
2671 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; 2586 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2672 break; 2587 break;
2673 case WRITE_16: 2588 case WRITE_16:
@@ -2676,12 +2591,13 @@ static int transport_generic_cmd_sequencer(
2676 goto out_unsupported_cdb; 2591 goto out_unsupported_cdb;
2677 size = transport_get_size(sectors, cdb, cmd); 2592 size = transport_get_size(sectors, cdb, cmd);
2678 cmd->t_task_lba = transport_lba_64(cdb); 2593 cmd->t_task_lba = transport_lba_64(cdb);
2679 cmd->t_tasks_fua = (cdb[1] & 0x8); 2594 if (cdb[1] & 0x8)
2595 cmd->se_cmd_flags |= SCF_FUA;
2680 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; 2596 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2681 break; 2597 break;
2682 case XDWRITEREAD_10: 2598 case XDWRITEREAD_10:
2683 if ((cmd->data_direction != DMA_TO_DEVICE) || 2599 if ((cmd->data_direction != DMA_TO_DEVICE) ||
2684 !(cmd->t_tasks_bidi)) 2600 !(cmd->se_cmd_flags & SCF_BIDI))
2685 goto out_invalid_cdb_field; 2601 goto out_invalid_cdb_field;
2686 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret); 2602 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
2687 if (sector_ret) 2603 if (sector_ret)
@@ -2700,7 +2616,8 @@ static int transport_generic_cmd_sequencer(
2700 * Setup BIDI XOR callback to be run after I/O completion. 2616 * Setup BIDI XOR callback to be run after I/O completion.
2701 */ 2617 */
2702 cmd->transport_complete_callback = &transport_xor_callback; 2618 cmd->transport_complete_callback = &transport_xor_callback;
2703 cmd->t_tasks_fua = (cdb[1] & 0x8); 2619 if (cdb[1] & 0x8)
2620 cmd->se_cmd_flags |= SCF_FUA;
2704 break; 2621 break;
2705 case VARIABLE_LENGTH_CMD: 2622 case VARIABLE_LENGTH_CMD:
2706 service_action = get_unaligned_be16(&cdb[8]); 2623 service_action = get_unaligned_be16(&cdb[8]);
@@ -2728,7 +2645,8 @@ static int transport_generic_cmd_sequencer(
2728 * completion. 2645 * completion.
2729 */ 2646 */
2730 cmd->transport_complete_callback = &transport_xor_callback; 2647 cmd->transport_complete_callback = &transport_xor_callback;
2731 cmd->t_tasks_fua = (cdb[10] & 0x8); 2648 if (cdb[1] & 0x8)
2649 cmd->se_cmd_flags |= SCF_FUA;
2732 break; 2650 break;
2733 case WRITE_SAME_32: 2651 case WRITE_SAME_32:
2734 sectors = transport_get_sectors_32(cdb, cmd, &sector_ret); 2652 sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
@@ -3171,18 +3089,13 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
3171 " SIMPLE: %u\n", dev->dev_cur_ordered_id, 3089 " SIMPLE: %u\n", dev->dev_cur_ordered_id,
3172 cmd->se_ordered_id); 3090 cmd->se_ordered_id);
3173 } else if (cmd->sam_task_attr == MSG_HEAD_TAG) { 3091 } else if (cmd->sam_task_attr == MSG_HEAD_TAG) {
3174 atomic_dec(&dev->dev_hoq_count);
3175 smp_mb__after_atomic_dec();
3176 dev->dev_cur_ordered_id++; 3092 dev->dev_cur_ordered_id++;
3177 pr_debug("Incremented dev_cur_ordered_id: %u for" 3093 pr_debug("Incremented dev_cur_ordered_id: %u for"
3178 " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id, 3094 " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
3179 cmd->se_ordered_id); 3095 cmd->se_ordered_id);
3180 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { 3096 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
3181 spin_lock(&dev->ordered_cmd_lock);
3182 list_del(&cmd->se_ordered_node);
3183 atomic_dec(&dev->dev_ordered_sync); 3097 atomic_dec(&dev->dev_ordered_sync);
3184 smp_mb__after_atomic_dec(); 3098 smp_mb__after_atomic_dec();
3185 spin_unlock(&dev->ordered_cmd_lock);
3186 3099
3187 dev->dev_cur_ordered_id++; 3100 dev->dev_cur_ordered_id++;
3188 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:" 3101 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
@@ -3495,6 +3408,18 @@ int transport_generic_map_mem_to_cmd(
3495 3408
3496 if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) || 3409 if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
3497 (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) { 3410 (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) {
3411 /*
3412 * Reject SCSI data overflow with map_mem_to_cmd() as incoming
3413 * scatterlists already have been set to follow what the fabric
3414 * passes for the original expected data transfer length.
3415 */
3416 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
3417 pr_warn("Rejecting SCSI DATA overflow for fabric using"
3418 " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
3419 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3420 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
3421 return -EINVAL;
3422 }
3498 3423
3499 cmd->t_data_sg = sgl; 3424 cmd->t_data_sg = sgl;
3500 cmd->t_data_nents = sgl_count; 3425 cmd->t_data_nents = sgl_count;
@@ -3813,7 +3738,7 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
3813 cmd->data_length) { 3738 cmd->data_length) {
3814 ret = transport_generic_get_mem(cmd); 3739 ret = transport_generic_get_mem(cmd);
3815 if (ret < 0) 3740 if (ret < 0)
3816 return ret; 3741 goto out_fail;
3817 } 3742 }
3818 3743
3819 /* 3744 /*
@@ -3842,8 +3767,15 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
3842 task_cdbs = transport_allocate_control_task(cmd); 3767 task_cdbs = transport_allocate_control_task(cmd);
3843 } 3768 }
3844 3769
3845 if (task_cdbs <= 0) 3770 if (task_cdbs < 0)
3846 goto out_fail; 3771 goto out_fail;
3772 else if (!task_cdbs && (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) {
3773 cmd->t_state = TRANSPORT_COMPLETE;
3774 atomic_set(&cmd->t_transport_active, 1);
3775 INIT_WORK(&cmd->work, target_complete_ok_work);
3776 queue_work(target_completion_wq, &cmd->work);
3777 return 0;
3778 }
3847 3779
3848 if (set_counts) { 3780 if (set_counts) {
3849 atomic_inc(&cmd->t_fe_count); 3781 atomic_inc(&cmd->t_fe_count);
@@ -3929,7 +3861,7 @@ static int transport_generic_write_pending(struct se_cmd *cmd)
3929 else if (ret < 0) 3861 else if (ret < 0)
3930 return ret; 3862 return ret;
3931 3863
3932 return PYX_TRANSPORT_WRITE_PENDING; 3864 return 1;
3933 3865
3934queue_full: 3866queue_full:
3935 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); 3867 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
@@ -4602,9 +4534,6 @@ void transport_send_task_abort(struct se_cmd *cmd)
4602 if (cmd->se_tfo->write_pending_status(cmd) != 0) { 4534 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
4603 atomic_inc(&cmd->t_transport_aborted); 4535 atomic_inc(&cmd->t_transport_aborted);
4604 smp_mb__after_atomic_inc(); 4536 smp_mb__after_atomic_inc();
4605 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
4606 transport_new_cmd_failure(cmd);
4607 return;
4608 } 4537 }
4609 } 4538 }
4610 cmd->scsi_status = SAM_STAT_TASK_ABORTED; 4539 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
@@ -4670,8 +4599,6 @@ static int transport_processing_thread(void *param)
4670 struct se_cmd *cmd; 4599 struct se_cmd *cmd;
4671 struct se_device *dev = (struct se_device *) param; 4600 struct se_device *dev = (struct se_device *) param;
4672 4601
4673 set_user_nice(current, -20);
4674
4675 while (!kthread_should_stop()) { 4602 while (!kthread_should_stop()) {
4676 ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq, 4603 ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq,
4677 atomic_read(&dev->dev_queue_obj.queue_cnt) || 4604 atomic_read(&dev->dev_queue_obj.queue_cnt) ||
@@ -4698,18 +4625,13 @@ get_cmd:
4698 } 4625 }
4699 ret = cmd->se_tfo->new_cmd_map(cmd); 4626 ret = cmd->se_tfo->new_cmd_map(cmd);
4700 if (ret < 0) { 4627 if (ret < 0) {
4701 cmd->transport_error_status = ret; 4628 transport_generic_request_failure(cmd);
4702 transport_generic_request_failure(cmd,
4703 0, (cmd->data_direction !=
4704 DMA_TO_DEVICE));
4705 break; 4629 break;
4706 } 4630 }
4707 ret = transport_generic_new_cmd(cmd); 4631 ret = transport_generic_new_cmd(cmd);
4708 if (ret < 0) { 4632 if (ret < 0) {
4709 cmd->transport_error_status = ret; 4633 transport_generic_request_failure(cmd);
4710 transport_generic_request_failure(cmd, 4634 break;
4711 0, (cmd->data_direction !=
4712 DMA_TO_DEVICE));
4713 } 4635 }
4714 break; 4636 break;
4715 case TRANSPORT_PROCESS_WRITE: 4637 case TRANSPORT_PROCESS_WRITE:
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 4fac37c4c615..71fc9cea5dc9 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -200,7 +200,7 @@ int ft_write_pending(struct se_cmd *se_cmd)
200 lport = ep->lp; 200 lport = ep->lp;
201 fp = fc_frame_alloc(lport, sizeof(*txrdy)); 201 fp = fc_frame_alloc(lport, sizeof(*txrdy));
202 if (!fp) 202 if (!fp)
203 return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; 203 return -ENOMEM; /* Signal QUEUE_FULL */
204 204
205 txrdy = fc_frame_payload_get(fp, sizeof(*txrdy)); 205 txrdy = fc_frame_payload_get(fp, sizeof(*txrdy));
206 memset(txrdy, 0, sizeof(*txrdy)); 206 memset(txrdy, 0, sizeof(*txrdy));
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index 5f770412ca40..9402b7387cac 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -436,8 +436,7 @@ static void ft_del_lport(struct se_wwn *wwn)
436 struct ft_lport_acl *lacl = container_of(wwn, 436 struct ft_lport_acl *lacl = container_of(wwn,
437 struct ft_lport_acl, fc_lport_wwn); 437 struct ft_lport_acl, fc_lport_wwn);
438 438
439 pr_debug("del lport %s\n", 439 pr_debug("del lport %s\n", lacl->name);
440 config_item_name(&wwn->wwn_group.cg_item));
441 mutex_lock(&ft_lport_lock); 440 mutex_lock(&ft_lport_lock);
442 list_del(&lacl->list); 441 list_del(&lacl->list);
443 mutex_unlock(&ft_lport_lock); 442 mutex_unlock(&ft_lport_lock);
diff --git a/drivers/tty/hvc/hvc_dcc.c b/drivers/tty/hvc/hvc_dcc.c
index 435f6facbc23..44fbebab5075 100644
--- a/drivers/tty/hvc/hvc_dcc.c
+++ b/drivers/tty/hvc/hvc_dcc.c
@@ -46,6 +46,7 @@ static inline char __dcc_getchar(void)
46 46
47 asm volatile("mrc p14, 0, %0, c0, c5, 0 @ read comms data reg" 47 asm volatile("mrc p14, 0, %0, c0, c5, 0 @ read comms data reg"
48 : "=r" (__c)); 48 : "=r" (__c));
49 isb();
49 50
50 return __c; 51 return __c;
51} 52}
@@ -55,6 +56,7 @@ static inline void __dcc_putchar(char c)
55 asm volatile("mcr p14, 0, %0, c0, c5, 0 @ write a char" 56 asm volatile("mcr p14, 0, %0, c0, c5, 0 @ write a char"
56 : /* no output register */ 57 : /* no output register */
57 : "r" (c)); 58 : "r" (c));
59 isb();
58} 60}
59 61
60static int hvc_dcc_put_chars(uint32_t vt, const char *buf, int count) 62static int hvc_dcc_put_chars(uint32_t vt, const char *buf, int count)
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 4cb0d0a3e57b..fc7bbba585ce 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -66,14 +66,16 @@
66static int debug; 66static int debug;
67module_param(debug, int, 0600); 67module_param(debug, int, 0600);
68 68
69#define T1 (HZ/10) 69/* Defaults: these are from the specification */
70#define T2 (HZ/3) 70
71#define N2 3 71#define T1 10 /* 100mS */
72#define T2 34 /* 333mS */
73#define N2 3 /* Retry 3 times */
72 74
73/* Use long timers for testing at low speed with debug on */ 75/* Use long timers for testing at low speed with debug on */
74#ifdef DEBUG_TIMING 76#ifdef DEBUG_TIMING
75#define T1 HZ 77#define T1 100
76#define T2 (2 * HZ) 78#define T2 200
77#endif 79#endif
78 80
79/* 81/*
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 5f479dada6f2..925a1e547a83 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -1560,7 +1560,7 @@ config SERIAL_IFX6X60
1560 Support for the IFX6x60 modem devices on Intel MID platforms. 1560 Support for the IFX6x60 modem devices on Intel MID platforms.
1561 1561
1562config SERIAL_PCH_UART 1562config SERIAL_PCH_UART
1563 tristate "Intel EG20T PCH / OKI SEMICONDUCTOR IOH(ML7213/ML7223) UART" 1563 tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) UART"
1564 depends on PCI 1564 depends on PCI
1565 select SERIAL_CORE 1565 select SERIAL_CORE
1566 help 1566 help
@@ -1568,12 +1568,12 @@ config SERIAL_PCH_UART
1568 which is an IOH(Input/Output Hub) for x86 embedded processor. 1568 which is an IOH(Input/Output Hub) for x86 embedded processor.
1569 Enabling PCH_DMA, this PCH UART works as DMA mode. 1569 Enabling PCH_DMA, this PCH UART works as DMA mode.
1570 1570
1571 This driver also can be used for OKI SEMICONDUCTOR IOH(Input/ 1571 This driver also can be used for LAPIS Semiconductor IOH(Input/
1572 Output Hub), ML7213 and ML7223. 1572 Output Hub), ML7213, ML7223 and ML7831.
1573 ML7213 IOH is for IVI(In-Vehicle Infotainment) use and ML7223 IOH is 1573 ML7213 IOH is for IVI(In-Vehicle Infotainment) use, ML7223 IOH is
1574 for MP(Media Phone) use. 1574 for MP(Media Phone) use and ML7831 IOH is for general purpose use.
1575 ML7213/ML7223 is companion chip for Intel Atom E6xx series. 1575 ML7213/ML7223/ML7831 is companion chip for Intel Atom E6xx series.
1576 ML7213/ML7223 is completely compatible for Intel EG20T PCH. 1576 ML7213/ML7223/ML7831 is completely compatible for Intel EG20T PCH.
1577 1577
1578config SERIAL_MSM_SMD 1578config SERIAL_MSM_SMD
1579 bool "Enable tty device interface for some SMD ports" 1579 bool "Enable tty device interface for some SMD ports"
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 4a0f86fa1e90..4c823f341d98 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -228,7 +228,7 @@ void atmel_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf)
228 if (rs485conf->flags & SER_RS485_ENABLED) { 228 if (rs485conf->flags & SER_RS485_ENABLED) {
229 dev_dbg(port->dev, "Setting UART to RS485\n"); 229 dev_dbg(port->dev, "Setting UART to RS485\n");
230 atmel_port->tx_done_mask = ATMEL_US_TXEMPTY; 230 atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
231 if (rs485conf->flags & SER_RS485_RTS_AFTER_SEND) 231 if ((rs485conf->delay_rts_after_send) > 0)
232 UART_PUT_TTGR(port, rs485conf->delay_rts_after_send); 232 UART_PUT_TTGR(port, rs485conf->delay_rts_after_send);
233 mode |= ATMEL_US_USMODE_RS485; 233 mode |= ATMEL_US_USMODE_RS485;
234 } else { 234 } else {
@@ -304,7 +304,7 @@ static void atmel_set_mctrl(struct uart_port *port, u_int mctrl)
304 304
305 if (atmel_port->rs485.flags & SER_RS485_ENABLED) { 305 if (atmel_port->rs485.flags & SER_RS485_ENABLED) {
306 dev_dbg(port->dev, "Setting UART to RS485\n"); 306 dev_dbg(port->dev, "Setting UART to RS485\n");
307 if (atmel_port->rs485.flags & SER_RS485_RTS_AFTER_SEND) 307 if ((atmel_port->rs485.delay_rts_after_send) > 0)
308 UART_PUT_TTGR(port, 308 UART_PUT_TTGR(port,
309 atmel_port->rs485.delay_rts_after_send); 309 atmel_port->rs485.delay_rts_after_send);
310 mode |= ATMEL_US_USMODE_RS485; 310 mode |= ATMEL_US_USMODE_RS485;
@@ -1228,7 +1228,7 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
1228 1228
1229 if (atmel_port->rs485.flags & SER_RS485_ENABLED) { 1229 if (atmel_port->rs485.flags & SER_RS485_ENABLED) {
1230 dev_dbg(port->dev, "Setting UART to RS485\n"); 1230 dev_dbg(port->dev, "Setting UART to RS485\n");
1231 if (atmel_port->rs485.flags & SER_RS485_RTS_AFTER_SEND) 1231 if ((atmel_port->rs485.delay_rts_after_send) > 0)
1232 UART_PUT_TTGR(port, 1232 UART_PUT_TTGR(port,
1233 atmel_port->rs485.delay_rts_after_send); 1233 atmel_port->rs485.delay_rts_after_send);
1234 mode |= ATMEL_US_USMODE_RS485; 1234 mode |= ATMEL_US_USMODE_RS485;
@@ -1447,16 +1447,6 @@ static void __devinit atmel_of_init_port(struct atmel_uart_port *atmel_port,
1447 rs485conf->delay_rts_after_send = rs485_delay[1]; 1447 rs485conf->delay_rts_after_send = rs485_delay[1];
1448 rs485conf->flags = 0; 1448 rs485conf->flags = 0;
1449 1449
1450 if (rs485conf->delay_rts_before_send == 0 &&
1451 rs485conf->delay_rts_after_send == 0) {
1452 rs485conf->flags |= SER_RS485_RTS_ON_SEND;
1453 } else {
1454 if (rs485conf->delay_rts_before_send)
1455 rs485conf->flags |= SER_RS485_RTS_BEFORE_SEND;
1456 if (rs485conf->delay_rts_after_send)
1457 rs485conf->flags |= SER_RS485_RTS_AFTER_SEND;
1458 }
1459
1460 if (of_get_property(np, "rs485-rx-during-tx", NULL)) 1450 if (of_get_property(np, "rs485-rx-during-tx", NULL))
1461 rs485conf->flags |= SER_RS485_RX_DURING_TX; 1451 rs485conf->flags |= SER_RS485_RX_DURING_TX;
1462 1452
diff --git a/drivers/tty/serial/crisv10.c b/drivers/tty/serial/crisv10.c
index b7435043f2fe..1dfba7b779c8 100644
--- a/drivers/tty/serial/crisv10.c
+++ b/drivers/tty/serial/crisv10.c
@@ -3234,9 +3234,8 @@ rs_write(struct tty_struct *tty,
3234 e100_disable_rx(info); 3234 e100_disable_rx(info);
3235 e100_enable_rx_irq(info); 3235 e100_enable_rx_irq(info);
3236#endif 3236#endif
3237 if ((info->rs485.flags & SER_RS485_RTS_BEFORE_SEND) && 3237 if (info->rs485.delay_rts_before_send > 0)
3238 (info->rs485.delay_rts_before_send > 0)) 3238 msleep(info->rs485.delay_rts_before_send);
3239 msleep(info->rs485.delay_rts_before_send);
3240 } 3239 }
3241#endif /* CONFIG_ETRAX_RS485 */ 3240#endif /* CONFIG_ETRAX_RS485 */
3242 3241
@@ -3693,10 +3692,6 @@ rs_ioctl(struct tty_struct *tty,
3693 3692
3694 rs485data.delay_rts_before_send = rs485ctrl.delay_rts_before_send; 3693 rs485data.delay_rts_before_send = rs485ctrl.delay_rts_before_send;
3695 rs485data.flags = 0; 3694 rs485data.flags = 0;
3696 if (rs485data.delay_rts_before_send != 0)
3697 rs485data.flags |= SER_RS485_RTS_BEFORE_SEND;
3698 else
3699 rs485data.flags &= ~(SER_RS485_RTS_BEFORE_SEND);
3700 3695
3701 if (rs485ctrl.enabled) 3696 if (rs485ctrl.enabled)
3702 rs485data.flags |= SER_RS485_ENABLED; 3697 rs485data.flags |= SER_RS485_ENABLED;
@@ -4531,7 +4526,6 @@ static int __init rs_init(void)
4531 /* Set sane defaults */ 4526 /* Set sane defaults */
4532 info->rs485.flags &= ~(SER_RS485_RTS_ON_SEND); 4527 info->rs485.flags &= ~(SER_RS485_RTS_ON_SEND);
4533 info->rs485.flags |= SER_RS485_RTS_AFTER_SEND; 4528 info->rs485.flags |= SER_RS485_RTS_AFTER_SEND;
4534 info->rs485.flags &= ~(SER_RS485_RTS_BEFORE_SEND);
4535 info->rs485.delay_rts_before_send = 0; 4529 info->rs485.delay_rts_before_send = 0;
4536 info->rs485.flags &= ~(SER_RS485_ENABLED); 4530 info->rs485.flags &= ~(SER_RS485_ENABLED);
4537#endif 4531#endif
diff --git a/drivers/tty/serial/mfd.c b/drivers/tty/serial/mfd.c
index 286c386d9c46..e272d3919c67 100644
--- a/drivers/tty/serial/mfd.c
+++ b/drivers/tty/serial/mfd.c
@@ -884,7 +884,6 @@ serial_hsu_set_termios(struct uart_port *port, struct ktermios *termios,
884{ 884{
885 struct uart_hsu_port *up = 885 struct uart_hsu_port *up =
886 container_of(port, struct uart_hsu_port, port); 886 container_of(port, struct uart_hsu_port, port);
887 struct tty_struct *tty = port->state->port.tty;
888 unsigned char cval, fcr = 0; 887 unsigned char cval, fcr = 0;
889 unsigned long flags; 888 unsigned long flags;
890 unsigned int baud, quot; 889 unsigned int baud, quot;
@@ -907,8 +906,7 @@ serial_hsu_set_termios(struct uart_port *port, struct ktermios *termios,
907 } 906 }
908 907
909 /* CMSPAR isn't supported by this driver */ 908 /* CMSPAR isn't supported by this driver */
910 if (tty) 909 termios->c_cflag &= ~CMSPAR;
911 tty->termios->c_cflag &= ~CMSPAR;
912 910
913 if (termios->c_cflag & CSTOPB) 911 if (termios->c_cflag & CSTOPB)
914 cval |= UART_LCR_STOP; 912 cval |= UART_LCR_STOP;
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index 21febef926aa..d6aba8c087e4 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -1,5 +1,5 @@
1/* 1/*
2 *Copyright (C) 2010 OKI SEMICONDUCTOR CO., LTD. 2 *Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
3 * 3 *
4 *This program is free software; you can redistribute it and/or modify 4 *This program is free software; you can redistribute it and/or modify
5 *it under the terms of the GNU General Public License as published by 5 *it under the terms of the GNU General Public License as published by
@@ -46,8 +46,8 @@ enum {
46 46
47/* Set the max number of UART port 47/* Set the max number of UART port
48 * Intel EG20T PCH: 4 port 48 * Intel EG20T PCH: 4 port
49 * OKI SEMICONDUCTOR ML7213 IOH: 3 port 49 * LAPIS Semiconductor ML7213 IOH: 3 port
50 * OKI SEMICONDUCTOR ML7223 IOH: 2 port 50 * LAPIS Semiconductor ML7223 IOH: 2 port
51*/ 51*/
52#define PCH_UART_NR 4 52#define PCH_UART_NR 4
53 53
@@ -258,6 +258,8 @@ enum pch_uart_num_t {
258 pch_ml7213_uart2, 258 pch_ml7213_uart2,
259 pch_ml7223_uart0, 259 pch_ml7223_uart0,
260 pch_ml7223_uart1, 260 pch_ml7223_uart1,
261 pch_ml7831_uart0,
262 pch_ml7831_uart1,
261}; 263};
262 264
263static struct pch_uart_driver_data drv_dat[] = { 265static struct pch_uart_driver_data drv_dat[] = {
@@ -270,6 +272,8 @@ static struct pch_uart_driver_data drv_dat[] = {
270 [pch_ml7213_uart2] = {PCH_UART_2LINE, 2}, 272 [pch_ml7213_uart2] = {PCH_UART_2LINE, 2},
271 [pch_ml7223_uart0] = {PCH_UART_8LINE, 0}, 273 [pch_ml7223_uart0] = {PCH_UART_8LINE, 0},
272 [pch_ml7223_uart1] = {PCH_UART_2LINE, 1}, 274 [pch_ml7223_uart1] = {PCH_UART_2LINE, 1},
275 [pch_ml7831_uart0] = {PCH_UART_8LINE, 0},
276 [pch_ml7831_uart1] = {PCH_UART_2LINE, 1},
273}; 277};
274 278
275static unsigned int default_baud = 9600; 279static unsigned int default_baud = 9600;
@@ -628,6 +632,7 @@ static void pch_request_dma(struct uart_port *port)
628 dev_err(priv->port.dev, "%s:dma_request_channel FAILS(Rx)\n", 632 dev_err(priv->port.dev, "%s:dma_request_channel FAILS(Rx)\n",
629 __func__); 633 __func__);
630 dma_release_channel(priv->chan_tx); 634 dma_release_channel(priv->chan_tx);
635 priv->chan_tx = NULL;
631 return; 636 return;
632 } 637 }
633 638
@@ -1215,8 +1220,7 @@ static void pch_uart_shutdown(struct uart_port *port)
1215 dev_err(priv->port.dev, 1220 dev_err(priv->port.dev,
1216 "pch_uart_hal_set_fifo Failed(ret=%d)\n", ret); 1221 "pch_uart_hal_set_fifo Failed(ret=%d)\n", ret);
1217 1222
1218 if (priv->use_dma_flag) 1223 pch_free_dma(port);
1219 pch_free_dma(port);
1220 1224
1221 free_irq(priv->port.irq, priv); 1225 free_irq(priv->port.irq, priv);
1222} 1226}
@@ -1280,6 +1284,7 @@ static void pch_uart_set_termios(struct uart_port *port,
1280 if (rtn) 1284 if (rtn)
1281 goto out; 1285 goto out;
1282 1286
1287 pch_uart_set_mctrl(&priv->port, priv->port.mctrl);
1283 /* Don't rewrite B0 */ 1288 /* Don't rewrite B0 */
1284 if (tty_termios_baud_rate(termios)) 1289 if (tty_termios_baud_rate(termios))
1285 tty_termios_encode_baud_rate(termios, baud, baud); 1290 tty_termios_encode_baud_rate(termios, baud, baud);
@@ -1552,6 +1557,10 @@ static DEFINE_PCI_DEVICE_TABLE(pch_uart_pci_id) = {
1552 .driver_data = pch_ml7223_uart0}, 1557 .driver_data = pch_ml7223_uart0},
1553 {PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x800D), 1558 {PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x800D),
1554 .driver_data = pch_ml7223_uart1}, 1559 .driver_data = pch_ml7223_uart1},
1560 {PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8811),
1561 .driver_data = pch_ml7831_uart0},
1562 {PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8812),
1563 .driver_data = pch_ml7831_uart1},
1555 {0,}, 1564 {0,},
1556}; 1565};
1557 1566
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 1945c70539c2..aff9d612dff0 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -207,6 +207,25 @@ static struct plat_sci_reg sci_regmap[SCIx_NR_REGTYPES][SCIx_NR_REGS] = {
207 }, 207 },
208 208
209 /* 209 /*
210 * Common SH-2(A) SCIF definitions for ports with FIFO data
211 * count registers.
212 */
213 [SCIx_SH2_SCIF_FIFODATA_REGTYPE] = {
214 [SCSMR] = { 0x00, 16 },
215 [SCBRR] = { 0x04, 8 },
216 [SCSCR] = { 0x08, 16 },
217 [SCxTDR] = { 0x0c, 8 },
218 [SCxSR] = { 0x10, 16 },
219 [SCxRDR] = { 0x14, 8 },
220 [SCFCR] = { 0x18, 16 },
221 [SCFDR] = { 0x1c, 16 },
222 [SCTFDR] = sci_reg_invalid,
223 [SCRFDR] = sci_reg_invalid,
224 [SCSPTR] = { 0x20, 16 },
225 [SCLSR] = { 0x24, 16 },
226 },
227
228 /*
210 * Common SH-3 SCIF definitions. 229 * Common SH-3 SCIF definitions.
211 */ 230 */
212 [SCIx_SH3_SCIF_REGTYPE] = { 231 [SCIx_SH3_SCIF_REGTYPE] = {
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 512c49f98e85..8e0924f55446 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -36,6 +36,7 @@
36 36
37#include <linux/kmod.h> 37#include <linux/kmod.h>
38#include <linux/nsproxy.h> 38#include <linux/nsproxy.h>
39#include <linux/ratelimit.h>
39 40
40/* 41/*
41 * This guards the refcounted line discipline lists. The lock 42 * This guards the refcounted line discipline lists. The lock
@@ -547,15 +548,16 @@ static void tty_ldisc_flush_works(struct tty_struct *tty)
547/** 548/**
548 * tty_ldisc_wait_idle - wait for the ldisc to become idle 549 * tty_ldisc_wait_idle - wait for the ldisc to become idle
549 * @tty: tty to wait for 550 * @tty: tty to wait for
551 * @timeout: for how long to wait at most
550 * 552 *
551 * Wait for the line discipline to become idle. The discipline must 553 * Wait for the line discipline to become idle. The discipline must
552 * have been halted for this to guarantee it remains idle. 554 * have been halted for this to guarantee it remains idle.
553 */ 555 */
554static int tty_ldisc_wait_idle(struct tty_struct *tty) 556static int tty_ldisc_wait_idle(struct tty_struct *tty, long timeout)
555{ 557{
556 int ret; 558 long ret;
557 ret = wait_event_timeout(tty_ldisc_idle, 559 ret = wait_event_timeout(tty_ldisc_idle,
558 atomic_read(&tty->ldisc->users) == 1, 5 * HZ); 560 atomic_read(&tty->ldisc->users) == 1, timeout);
559 if (ret < 0) 561 if (ret < 0)
560 return ret; 562 return ret;
561 return ret > 0 ? 0 : -EBUSY; 563 return ret > 0 ? 0 : -EBUSY;
@@ -665,7 +667,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
665 667
666 tty_ldisc_flush_works(tty); 668 tty_ldisc_flush_works(tty);
667 669
668 retval = tty_ldisc_wait_idle(tty); 670 retval = tty_ldisc_wait_idle(tty, 5 * HZ);
669 671
670 tty_lock(); 672 tty_lock();
671 mutex_lock(&tty->ldisc_mutex); 673 mutex_lock(&tty->ldisc_mutex);
@@ -762,8 +764,6 @@ static int tty_ldisc_reinit(struct tty_struct *tty, int ldisc)
762 if (IS_ERR(ld)) 764 if (IS_ERR(ld))
763 return -1; 765 return -1;
764 766
765 WARN_ON_ONCE(tty_ldisc_wait_idle(tty));
766
767 tty_ldisc_close(tty, tty->ldisc); 767 tty_ldisc_close(tty, tty->ldisc);
768 tty_ldisc_put(tty->ldisc); 768 tty_ldisc_put(tty->ldisc);
769 tty->ldisc = NULL; 769 tty->ldisc = NULL;
@@ -838,7 +838,7 @@ void tty_ldisc_hangup(struct tty_struct *tty)
838 tty_unlock(); 838 tty_unlock();
839 cancel_work_sync(&tty->buf.work); 839 cancel_work_sync(&tty->buf.work);
840 mutex_unlock(&tty->ldisc_mutex); 840 mutex_unlock(&tty->ldisc_mutex);
841 841retry:
842 tty_lock(); 842 tty_lock();
843 mutex_lock(&tty->ldisc_mutex); 843 mutex_lock(&tty->ldisc_mutex);
844 844
@@ -847,6 +847,22 @@ void tty_ldisc_hangup(struct tty_struct *tty)
847 it means auditing a lot of other paths so this is 847 it means auditing a lot of other paths so this is
848 a FIXME */ 848 a FIXME */
849 if (tty->ldisc) { /* Not yet closed */ 849 if (tty->ldisc) { /* Not yet closed */
850 if (atomic_read(&tty->ldisc->users) != 1) {
851 char cur_n[TASK_COMM_LEN], tty_n[64];
852 long timeout = 3 * HZ;
853 tty_unlock();
854
855 while (tty_ldisc_wait_idle(tty, timeout) == -EBUSY) {
856 timeout = MAX_SCHEDULE_TIMEOUT;
857 printk_ratelimited(KERN_WARNING
858 "%s: waiting (%s) for %s took too long, but we keep waiting...\n",
859 __func__, get_task_comm(cur_n, current),
860 tty_name(tty, tty_n));
861 }
862 mutex_unlock(&tty->ldisc_mutex);
863 goto retry;
864 }
865
850 if (reset == 0) { 866 if (reset == 0) {
851 867
852 if (!tty_ldisc_reinit(tty, tty->termios->c_line)) 868 if (!tty_ldisc_reinit(tty, tty->termios->c_line))
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 6960715c5063..a8078d0638fa 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -539,7 +539,6 @@ static void acm_port_down(struct acm *acm)
539{ 539{
540 int i; 540 int i;
541 541
542 mutex_lock(&open_mutex);
543 if (acm->dev) { 542 if (acm->dev) {
544 usb_autopm_get_interface(acm->control); 543 usb_autopm_get_interface(acm->control);
545 acm_set_control(acm, acm->ctrlout = 0); 544 acm_set_control(acm, acm->ctrlout = 0);
@@ -551,14 +550,15 @@ static void acm_port_down(struct acm *acm)
551 acm->control->needs_remote_wakeup = 0; 550 acm->control->needs_remote_wakeup = 0;
552 usb_autopm_put_interface(acm->control); 551 usb_autopm_put_interface(acm->control);
553 } 552 }
554 mutex_unlock(&open_mutex);
555} 553}
556 554
557static void acm_tty_hangup(struct tty_struct *tty) 555static void acm_tty_hangup(struct tty_struct *tty)
558{ 556{
559 struct acm *acm = tty->driver_data; 557 struct acm *acm = tty->driver_data;
560 tty_port_hangup(&acm->port); 558 tty_port_hangup(&acm->port);
559 mutex_lock(&open_mutex);
561 acm_port_down(acm); 560 acm_port_down(acm);
561 mutex_unlock(&open_mutex);
562} 562}
563 563
564static void acm_tty_close(struct tty_struct *tty, struct file *filp) 564static void acm_tty_close(struct tty_struct *tty, struct file *filp)
@@ -569,8 +569,9 @@ static void acm_tty_close(struct tty_struct *tty, struct file *filp)
569 shutdown */ 569 shutdown */
570 if (!acm) 570 if (!acm)
571 return; 571 return;
572
573 mutex_lock(&open_mutex);
572 if (tty_port_close_start(&acm->port, tty, filp) == 0) { 574 if (tty_port_close_start(&acm->port, tty, filp) == 0) {
573 mutex_lock(&open_mutex);
574 if (!acm->dev) { 575 if (!acm->dev) {
575 tty_port_tty_set(&acm->port, NULL); 576 tty_port_tty_set(&acm->port, NULL);
576 acm_tty_unregister(acm); 577 acm_tty_unregister(acm);
@@ -582,6 +583,7 @@ static void acm_tty_close(struct tty_struct *tty, struct file *filp)
582 acm_port_down(acm); 583 acm_port_down(acm);
583 tty_port_close_end(&acm->port, tty); 584 tty_port_close_end(&acm->port, tty);
584 tty_port_tty_set(&acm->port, NULL); 585 tty_port_tty_set(&acm->port, NULL);
586 mutex_unlock(&open_mutex);
585} 587}
586 588
587static int acm_tty_write(struct tty_struct *tty, 589static int acm_tty_write(struct tty_struct *tty,
@@ -1456,6 +1458,16 @@ static const struct usb_device_id acm_ids[] = {
1456 }, 1458 },
1457 { USB_DEVICE(0x22b8, 0x6425), /* Motorola MOTOMAGX phones */ 1459 { USB_DEVICE(0x22b8, 0x6425), /* Motorola MOTOMAGX phones */
1458 }, 1460 },
1461 /* Motorola H24 HSPA module: */
1462 { USB_DEVICE(0x22b8, 0x2d91) }, /* modem */
1463 { USB_DEVICE(0x22b8, 0x2d92) }, /* modem + diagnostics */
1464 { USB_DEVICE(0x22b8, 0x2d93) }, /* modem + AT port */
1465 { USB_DEVICE(0x22b8, 0x2d95) }, /* modem + AT port + diagnostics */
1466 { USB_DEVICE(0x22b8, 0x2d96) }, /* modem + NMEA */
1467 { USB_DEVICE(0x22b8, 0x2d97) }, /* modem + diagnostics + NMEA */
1468 { USB_DEVICE(0x22b8, 0x2d99) }, /* modem + AT port + NMEA */
1469 { USB_DEVICE(0x22b8, 0x2d9a) }, /* modem + AT port + diagnostics + NMEA */
1470
1459 { USB_DEVICE(0x0572, 0x1329), /* Hummingbird huc56s (Conexant) */ 1471 { USB_DEVICE(0x0572, 0x1329), /* Hummingbird huc56s (Conexant) */
1460 .driver_info = NO_UNION_NORMAL, /* union descriptor misplaced on 1472 .driver_info = NO_UNION_NORMAL, /* union descriptor misplaced on
1461 data interface instead of 1473 data interface instead of
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 96f05b29c9ad..79781461eec9 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -813,6 +813,12 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
813 USB_PORT_FEAT_C_PORT_LINK_STATE); 813 USB_PORT_FEAT_C_PORT_LINK_STATE);
814 } 814 }
815 815
816 if ((portchange & USB_PORT_STAT_C_BH_RESET) &&
817 hub_is_superspeed(hub->hdev)) {
818 need_debounce_delay = true;
819 clear_port_feature(hub->hdev, port1,
820 USB_PORT_FEAT_C_BH_PORT_RESET);
821 }
816 /* We can forget about a "removed" device when there's a 822 /* We can forget about a "removed" device when there's a
817 * physical disconnect or the connect status changes. 823 * physical disconnect or the connect status changes.
818 */ 824 */
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index d6a8d8269bfb..ecf12e15a7ef 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -50,15 +50,42 @@ static const struct usb_device_id usb_quirk_list[] = {
50 /* Logitech Webcam B/C500 */ 50 /* Logitech Webcam B/C500 */
51 { USB_DEVICE(0x046d, 0x0807), .driver_info = USB_QUIRK_RESET_RESUME }, 51 { USB_DEVICE(0x046d, 0x0807), .driver_info = USB_QUIRK_RESET_RESUME },
52 52
53 /* Logitech Webcam C600 */
54 { USB_DEVICE(0x046d, 0x0808), .driver_info = USB_QUIRK_RESET_RESUME },
55
53 /* Logitech Webcam Pro 9000 */ 56 /* Logitech Webcam Pro 9000 */
54 { USB_DEVICE(0x046d, 0x0809), .driver_info = USB_QUIRK_RESET_RESUME }, 57 { USB_DEVICE(0x046d, 0x0809), .driver_info = USB_QUIRK_RESET_RESUME },
55 58
59 /* Logitech Webcam C905 */
60 { USB_DEVICE(0x046d, 0x080a), .driver_info = USB_QUIRK_RESET_RESUME },
61
62 /* Logitech Webcam C210 */
63 { USB_DEVICE(0x046d, 0x0819), .driver_info = USB_QUIRK_RESET_RESUME },
64
65 /* Logitech Webcam C260 */
66 { USB_DEVICE(0x046d, 0x081a), .driver_info = USB_QUIRK_RESET_RESUME },
67
56 /* Logitech Webcam C310 */ 68 /* Logitech Webcam C310 */
57 { USB_DEVICE(0x046d, 0x081b), .driver_info = USB_QUIRK_RESET_RESUME }, 69 { USB_DEVICE(0x046d, 0x081b), .driver_info = USB_QUIRK_RESET_RESUME },
58 70
71 /* Logitech Webcam C910 */
72 { USB_DEVICE(0x046d, 0x0821), .driver_info = USB_QUIRK_RESET_RESUME },
73
74 /* Logitech Webcam C160 */
75 { USB_DEVICE(0x046d, 0x0824), .driver_info = USB_QUIRK_RESET_RESUME },
76
59 /* Logitech Webcam C270 */ 77 /* Logitech Webcam C270 */
60 { USB_DEVICE(0x046d, 0x0825), .driver_info = USB_QUIRK_RESET_RESUME }, 78 { USB_DEVICE(0x046d, 0x0825), .driver_info = USB_QUIRK_RESET_RESUME },
61 79
80 /* Logitech Quickcam Pro 9000 */
81 { USB_DEVICE(0x046d, 0x0990), .driver_info = USB_QUIRK_RESET_RESUME },
82
83 /* Logitech Quickcam E3500 */
84 { USB_DEVICE(0x046d, 0x09a4), .driver_info = USB_QUIRK_RESET_RESUME },
85
86 /* Logitech Quickcam Vision Pro */
87 { USB_DEVICE(0x046d, 0x09a6), .driver_info = USB_QUIRK_RESET_RESUME },
88
62 /* Logitech Harmony 700-series */ 89 /* Logitech Harmony 700-series */
63 { USB_DEVICE(0x046d, 0xc122), .driver_info = USB_QUIRK_DELAY_INIT }, 90 { USB_DEVICE(0x046d, 0xc122), .driver_info = USB_QUIRK_DELAY_INIT },
64 91
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index fa824cfdd2eb..25dbd8614e72 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1284,6 +1284,7 @@ static int __devinit dwc3_gadget_init_endpoints(struct dwc3 *dwc)
1284 int ret; 1284 int ret;
1285 1285
1286 dep->endpoint.maxpacket = 1024; 1286 dep->endpoint.maxpacket = 1024;
1287 dep->endpoint.max_streams = 15;
1287 dep->endpoint.ops = &dwc3_gadget_ep_ops; 1288 dep->endpoint.ops = &dwc3_gadget_ep_ops;
1288 list_add_tail(&dep->endpoint.ep_list, 1289 list_add_tail(&dep->endpoint.ep_list,
1289 &dwc->gadget.ep_list); 1290 &dwc->gadget.ep_list);
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index b21cd376c11a..23a447373c51 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -469,7 +469,7 @@ config USB_LANGWELL
469 gadget drivers to also be dynamically linked. 469 gadget drivers to also be dynamically linked.
470 470
471config USB_EG20T 471config USB_EG20T
472 tristate "Intel EG20T PCH/OKI SEMICONDUCTOR ML7213 IOH UDC" 472 tristate "Intel EG20T PCH/LAPIS Semiconductor IOH(ML7213/ML7831) UDC"
473 depends on PCI 473 depends on PCI
474 select USB_GADGET_DUALSPEED 474 select USB_GADGET_DUALSPEED
475 help 475 help
@@ -485,10 +485,11 @@ config USB_EG20T
485 This driver dose not support interrupt transfer or isochronous 485 This driver dose not support interrupt transfer or isochronous
486 transfer modes. 486 transfer modes.
487 487
488 This driver also can be used for OKI SEMICONDUCTOR's ML7213 which is 488 This driver also can be used for LAPIS Semiconductor's ML7213 which is
489 for IVI(In-Vehicle Infotainment) use. 489 for IVI(In-Vehicle Infotainment) use.
490 ML7213 is companion chip for Intel Atom E6xx series. 490 ML7831 is for general purpose use.
491 ML7213 is completely compatible for Intel EG20T PCH. 491 ML7213/ML7831 is companion chip for Intel Atom E6xx series.
492 ML7213/ML7831 is completely compatible for Intel EG20T PCH.
492 493
493config USB_CI13XXX_MSM 494config USB_CI13XXX_MSM
494 tristate "MIPS USB CI13xxx for MSM" 495 tristate "MIPS USB CI13xxx for MSM"
diff --git a/drivers/usb/gadget/amd5536udc.c b/drivers/usb/gadget/amd5536udc.c
index 4730016d7cd4..45f422ac103f 100644
--- a/drivers/usb/gadget/amd5536udc.c
+++ b/drivers/usb/gadget/amd5536udc.c
@@ -1959,7 +1959,7 @@ static int amd5536_start(struct usb_gadget_driver *driver,
1959 u32 tmp; 1959 u32 tmp;
1960 1960
1961 if (!driver || !bind || !driver->setup 1961 if (!driver || !bind || !driver->setup
1962 || driver->speed != USB_SPEED_HIGH) 1962 || driver->speed < USB_SPEED_HIGH)
1963 return -EINVAL; 1963 return -EINVAL;
1964 if (!dev) 1964 if (!dev)
1965 return -ENODEV; 1965 return -ENODEV;
diff --git a/drivers/usb/gadget/ci13xxx_msm.c b/drivers/usb/gadget/ci13xxx_msm.c
index 4eedfe557154..1fc612914c52 100644
--- a/drivers/usb/gadget/ci13xxx_msm.c
+++ b/drivers/usb/gadget/ci13xxx_msm.c
@@ -122,3 +122,5 @@ static int __init ci13xxx_msm_init(void)
122 return platform_driver_register(&ci13xxx_msm_driver); 122 return platform_driver_register(&ci13xxx_msm_driver);
123} 123}
124module_init(ci13xxx_msm_init); 124module_init(ci13xxx_msm_init);
125
126MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/gadget/ci13xxx_udc.c b/drivers/usb/gadget/ci13xxx_udc.c
index 83428f56253b..9a0c3979ff43 100644
--- a/drivers/usb/gadget/ci13xxx_udc.c
+++ b/drivers/usb/gadget/ci13xxx_udc.c
@@ -71,6 +71,9 @@
71/****************************************************************************** 71/******************************************************************************
72 * DEFINE 72 * DEFINE
73 *****************************************************************************/ 73 *****************************************************************************/
74
75#define DMA_ADDR_INVALID (~(dma_addr_t)0)
76
74/* ctrl register bank access */ 77/* ctrl register bank access */
75static DEFINE_SPINLOCK(udc_lock); 78static DEFINE_SPINLOCK(udc_lock);
76 79
@@ -1434,7 +1437,7 @@ static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
1434 return -EALREADY; 1437 return -EALREADY;
1435 1438
1436 mReq->req.status = -EALREADY; 1439 mReq->req.status = -EALREADY;
1437 if (length && !mReq->req.dma) { 1440 if (length && mReq->req.dma == DMA_ADDR_INVALID) {
1438 mReq->req.dma = \ 1441 mReq->req.dma = \
1439 dma_map_single(mEp->device, mReq->req.buf, 1442 dma_map_single(mEp->device, mReq->req.buf,
1440 length, mEp->dir ? DMA_TO_DEVICE : 1443 length, mEp->dir ? DMA_TO_DEVICE :
@@ -1453,7 +1456,7 @@ static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
1453 dma_unmap_single(mEp->device, mReq->req.dma, 1456 dma_unmap_single(mEp->device, mReq->req.dma,
1454 length, mEp->dir ? DMA_TO_DEVICE : 1457 length, mEp->dir ? DMA_TO_DEVICE :
1455 DMA_FROM_DEVICE); 1458 DMA_FROM_DEVICE);
1456 mReq->req.dma = 0; 1459 mReq->req.dma = DMA_ADDR_INVALID;
1457 mReq->map = 0; 1460 mReq->map = 0;
1458 } 1461 }
1459 return -ENOMEM; 1462 return -ENOMEM;
@@ -1549,7 +1552,7 @@ static int _hardware_dequeue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
1549 if (mReq->map) { 1552 if (mReq->map) {
1550 dma_unmap_single(mEp->device, mReq->req.dma, mReq->req.length, 1553 dma_unmap_single(mEp->device, mReq->req.dma, mReq->req.length,
1551 mEp->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 1554 mEp->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
1552 mReq->req.dma = 0; 1555 mReq->req.dma = DMA_ADDR_INVALID;
1553 mReq->map = 0; 1556 mReq->map = 0;
1554 } 1557 }
1555 1558
@@ -1610,7 +1613,6 @@ __acquires(mEp->lock)
1610 * @gadget: gadget 1613 * @gadget: gadget
1611 * 1614 *
1612 * This function returns an error code 1615 * This function returns an error code
1613 * Caller must hold lock
1614 */ 1616 */
1615static int _gadget_stop_activity(struct usb_gadget *gadget) 1617static int _gadget_stop_activity(struct usb_gadget *gadget)
1616{ 1618{
@@ -2189,6 +2191,7 @@ static struct usb_request *ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
2189 mReq = kzalloc(sizeof(struct ci13xxx_req), gfp_flags); 2191 mReq = kzalloc(sizeof(struct ci13xxx_req), gfp_flags);
2190 if (mReq != NULL) { 2192 if (mReq != NULL) {
2191 INIT_LIST_HEAD(&mReq->queue); 2193 INIT_LIST_HEAD(&mReq->queue);
2194 mReq->req.dma = DMA_ADDR_INVALID;
2192 2195
2193 mReq->ptr = dma_pool_alloc(mEp->td_pool, gfp_flags, 2196 mReq->ptr = dma_pool_alloc(mEp->td_pool, gfp_flags,
2194 &mReq->dma); 2197 &mReq->dma);
@@ -2328,7 +2331,7 @@ static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
2328 if (mReq->map) { 2331 if (mReq->map) {
2329 dma_unmap_single(mEp->device, mReq->req.dma, mReq->req.length, 2332 dma_unmap_single(mEp->device, mReq->req.dma, mReq->req.length,
2330 mEp->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 2333 mEp->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
2331 mReq->req.dma = 0; 2334 mReq->req.dma = DMA_ADDR_INVALID;
2332 mReq->map = 0; 2335 mReq->map = 0;
2333 } 2336 }
2334 req->status = -ECONNRESET; 2337 req->status = -ECONNRESET;
@@ -2500,12 +2503,12 @@ static int ci13xxx_wakeup(struct usb_gadget *_gadget)
2500 spin_lock_irqsave(udc->lock, flags); 2503 spin_lock_irqsave(udc->lock, flags);
2501 if (!udc->remote_wakeup) { 2504 if (!udc->remote_wakeup) {
2502 ret = -EOPNOTSUPP; 2505 ret = -EOPNOTSUPP;
2503 dbg_trace("remote wakeup feature is not enabled\n"); 2506 trace("remote wakeup feature is not enabled\n");
2504 goto out; 2507 goto out;
2505 } 2508 }
2506 if (!hw_cread(CAP_PORTSC, PORTSC_SUSP)) { 2509 if (!hw_cread(CAP_PORTSC, PORTSC_SUSP)) {
2507 ret = -EINVAL; 2510 ret = -EINVAL;
2508 dbg_trace("port is not suspended\n"); 2511 trace("port is not suspended\n");
2509 goto out; 2512 goto out;
2510 } 2513 }
2511 hw_cwrite(CAP_PORTSC, PORTSC_FPR, PORTSC_FPR); 2514 hw_cwrite(CAP_PORTSC, PORTSC_FPR, PORTSC_FPR);
@@ -2703,7 +2706,9 @@ static int ci13xxx_stop(struct usb_gadget_driver *driver)
2703 if (udc->udc_driver->notify_event) 2706 if (udc->udc_driver->notify_event)
2704 udc->udc_driver->notify_event(udc, 2707 udc->udc_driver->notify_event(udc,
2705 CI13XXX_CONTROLLER_STOPPED_EVENT); 2708 CI13XXX_CONTROLLER_STOPPED_EVENT);
2709 spin_unlock_irqrestore(udc->lock, flags);
2706 _gadget_stop_activity(&udc->gadget); 2710 _gadget_stop_activity(&udc->gadget);
2711 spin_lock_irqsave(udc->lock, flags);
2707 pm_runtime_put(&udc->gadget.dev); 2712 pm_runtime_put(&udc->gadget.dev);
2708 } 2713 }
2709 2714
@@ -2850,7 +2855,7 @@ static int udc_probe(struct ci13xxx_udc_driver *driver, struct device *dev,
2850 struct ci13xxx *udc; 2855 struct ci13xxx *udc;
2851 int retval = 0; 2856 int retval = 0;
2852 2857
2853 trace("%p, %p, %p", dev, regs, name); 2858 trace("%p, %p, %p", dev, regs, driver->name);
2854 2859
2855 if (dev == NULL || regs == NULL || driver == NULL || 2860 if (dev == NULL || regs == NULL || driver == NULL ||
2856 driver->name == NULL) 2861 driver->name == NULL)
diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c
index 52583a235330..1a6f415c0d02 100644
--- a/drivers/usb/gadget/f_mass_storage.c
+++ b/drivers/usb/gadget/f_mass_storage.c
@@ -624,7 +624,8 @@ static int fsg_setup(struct usb_function *f,
624 if (ctrl->bRequestType != 624 if (ctrl->bRequestType !=
625 (USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE)) 625 (USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE))
626 break; 626 break;
627 if (w_index != fsg->interface_number || w_value != 0) 627 if (w_index != fsg->interface_number || w_value != 0 ||
628 w_length != 0)
628 return -EDOM; 629 return -EDOM;
629 630
630 /* 631 /*
@@ -639,7 +640,8 @@ static int fsg_setup(struct usb_function *f,
639 if (ctrl->bRequestType != 640 if (ctrl->bRequestType !=
640 (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE)) 641 (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE))
641 break; 642 break;
642 if (w_index != fsg->interface_number || w_value != 0) 643 if (w_index != fsg->interface_number || w_value != 0 ||
644 w_length != 1)
643 return -EDOM; 645 return -EDOM;
644 VDBG(fsg, "get max LUN\n"); 646 VDBG(fsg, "get max LUN\n");
645 *(u8 *)req->buf = fsg->common->nluns - 1; 647 *(u8 *)req->buf = fsg->common->nluns - 1;
@@ -2973,6 +2975,7 @@ static void fsg_unbind(struct usb_configuration *c, struct usb_function *f)
2973 fsg_common_put(common); 2975 fsg_common_put(common);
2974 usb_free_descriptors(fsg->function.descriptors); 2976 usb_free_descriptors(fsg->function.descriptors);
2975 usb_free_descriptors(fsg->function.hs_descriptors); 2977 usb_free_descriptors(fsg->function.hs_descriptors);
2978 usb_free_descriptors(fsg->function.ss_descriptors);
2976 kfree(fsg); 2979 kfree(fsg);
2977} 2980}
2978 2981
diff --git a/drivers/usb/gadget/f_midi.c b/drivers/usb/gadget/f_midi.c
index 67b222908cf9..3797b3d6c622 100644
--- a/drivers/usb/gadget/f_midi.c
+++ b/drivers/usb/gadget/f_midi.c
@@ -95,7 +95,6 @@ static void f_midi_transmit(struct f_midi *midi, struct usb_request *req);
95 95
96DECLARE_UAC_AC_HEADER_DESCRIPTOR(1); 96DECLARE_UAC_AC_HEADER_DESCRIPTOR(1);
97DECLARE_USB_MIDI_OUT_JACK_DESCRIPTOR(1); 97DECLARE_USB_MIDI_OUT_JACK_DESCRIPTOR(1);
98DECLARE_USB_MIDI_OUT_JACK_DESCRIPTOR(16);
99DECLARE_USB_MS_ENDPOINT_DESCRIPTOR(16); 98DECLARE_USB_MS_ENDPOINT_DESCRIPTOR(16);
100 99
101/* B.3.1 Standard AC Interface Descriptor */ 100/* B.3.1 Standard AC Interface Descriptor */
@@ -140,26 +139,6 @@ static struct usb_ms_header_descriptor ms_header_desc __initdata = {
140 /* .wTotalLength = DYNAMIC */ 139 /* .wTotalLength = DYNAMIC */
141}; 140};
142 141
143/* B.4.3 Embedded MIDI IN Jack Descriptor */
144static struct usb_midi_in_jack_descriptor jack_in_emb_desc = {
145 .bLength = USB_DT_MIDI_IN_SIZE,
146 .bDescriptorType = USB_DT_CS_INTERFACE,
147 .bDescriptorSubtype = USB_MS_MIDI_IN_JACK,
148 .bJackType = USB_MS_EMBEDDED,
149 /* .bJackID = DYNAMIC */
150};
151
152/* B.4.4 Embedded MIDI OUT Jack Descriptor */
153static struct usb_midi_out_jack_descriptor_16 jack_out_emb_desc = {
154 /* .bLength = DYNAMIC */
155 .bDescriptorType = USB_DT_CS_INTERFACE,
156 .bDescriptorSubtype = USB_MS_MIDI_OUT_JACK,
157 .bJackType = USB_MS_EMBEDDED,
158 /* .bJackID = DYNAMIC */
159 /* .bNrInputPins = DYNAMIC */
160 /* .pins = DYNAMIC */
161};
162
163/* B.5.1 Standard Bulk OUT Endpoint Descriptor */ 142/* B.5.1 Standard Bulk OUT Endpoint Descriptor */
164static struct usb_endpoint_descriptor bulk_out_desc = { 143static struct usb_endpoint_descriptor bulk_out_desc = {
165 .bLength = USB_DT_ENDPOINT_AUDIO_SIZE, 144 .bLength = USB_DT_ENDPOINT_AUDIO_SIZE,
@@ -758,9 +737,11 @@ fail:
758static int __init 737static int __init
759f_midi_bind(struct usb_configuration *c, struct usb_function *f) 738f_midi_bind(struct usb_configuration *c, struct usb_function *f)
760{ 739{
761 struct usb_descriptor_header *midi_function[(MAX_PORTS * 2) + 12]; 740 struct usb_descriptor_header **midi_function;
762 struct usb_midi_in_jack_descriptor jack_in_ext_desc[MAX_PORTS]; 741 struct usb_midi_in_jack_descriptor jack_in_ext_desc[MAX_PORTS];
742 struct usb_midi_in_jack_descriptor jack_in_emb_desc[MAX_PORTS];
763 struct usb_midi_out_jack_descriptor_1 jack_out_ext_desc[MAX_PORTS]; 743 struct usb_midi_out_jack_descriptor_1 jack_out_ext_desc[MAX_PORTS];
744 struct usb_midi_out_jack_descriptor_1 jack_out_emb_desc[MAX_PORTS];
764 struct usb_composite_dev *cdev = c->cdev; 745 struct usb_composite_dev *cdev = c->cdev;
765 struct f_midi *midi = func_to_midi(f); 746 struct f_midi *midi = func_to_midi(f);
766 int status, n, jack = 1, i = 0; 747 int status, n, jack = 1, i = 0;
@@ -798,6 +779,14 @@ f_midi_bind(struct usb_configuration *c, struct usb_function *f)
798 goto fail; 779 goto fail;
799 midi->out_ep->driver_data = cdev; /* claim */ 780 midi->out_ep->driver_data = cdev; /* claim */
800 781
782 /* allocate temporary function list */
783 midi_function = kcalloc((MAX_PORTS * 4) + 9, sizeof(midi_function),
784 GFP_KERNEL);
785 if (!midi_function) {
786 status = -ENOMEM;
787 goto fail;
788 }
789
801 /* 790 /*
802 * construct the function's descriptor set. As the number of 791 * construct the function's descriptor set. As the number of
803 * input and output MIDI ports is configurable, we have to do 792 * input and output MIDI ports is configurable, we have to do
@@ -811,73 +800,74 @@ f_midi_bind(struct usb_configuration *c, struct usb_function *f)
811 800
812 /* calculate the header's wTotalLength */ 801 /* calculate the header's wTotalLength */
813 n = USB_DT_MS_HEADER_SIZE 802 n = USB_DT_MS_HEADER_SIZE
814 + (1 + midi->in_ports) * USB_DT_MIDI_IN_SIZE 803 + (midi->in_ports + midi->out_ports) *
815 + (1 + midi->out_ports) * USB_DT_MIDI_OUT_SIZE(1); 804 (USB_DT_MIDI_IN_SIZE + USB_DT_MIDI_OUT_SIZE(1));
816 ms_header_desc.wTotalLength = cpu_to_le16(n); 805 ms_header_desc.wTotalLength = cpu_to_le16(n);
817 806
818 midi_function[i++] = (struct usb_descriptor_header *) &ms_header_desc; 807 midi_function[i++] = (struct usb_descriptor_header *) &ms_header_desc;
819 808
820 /* we have one embedded IN jack */ 809 /* configure the external IN jacks, each linked to an embedded OUT jack */
821 jack_in_emb_desc.bJackID = jack++;
822 midi_function[i++] = (struct usb_descriptor_header *) &jack_in_emb_desc;
823
824 /* and a dynamic amount of external IN jacks */
825 for (n = 0; n < midi->in_ports; n++) {
826 struct usb_midi_in_jack_descriptor *ext = &jack_in_ext_desc[n];
827
828 ext->bLength = USB_DT_MIDI_IN_SIZE;
829 ext->bDescriptorType = USB_DT_CS_INTERFACE;
830 ext->bDescriptorSubtype = USB_MS_MIDI_IN_JACK;
831 ext->bJackType = USB_MS_EXTERNAL;
832 ext->bJackID = jack++;
833 ext->iJack = 0;
834
835 midi_function[i++] = (struct usb_descriptor_header *) ext;
836 }
837
838 /* one embedded OUT jack ... */
839 jack_out_emb_desc.bLength = USB_DT_MIDI_OUT_SIZE(midi->in_ports);
840 jack_out_emb_desc.bJackID = jack++;
841 jack_out_emb_desc.bNrInputPins = midi->in_ports;
842 /* ... which referencess all external IN jacks */
843 for (n = 0; n < midi->in_ports; n++) { 810 for (n = 0; n < midi->in_ports; n++) {
844 jack_out_emb_desc.pins[n].baSourceID = jack_in_ext_desc[n].bJackID; 811 struct usb_midi_in_jack_descriptor *in_ext = &jack_in_ext_desc[n];
845 jack_out_emb_desc.pins[n].baSourcePin = 1; 812 struct usb_midi_out_jack_descriptor_1 *out_emb = &jack_out_emb_desc[n];
813
814 in_ext->bLength = USB_DT_MIDI_IN_SIZE;
815 in_ext->bDescriptorType = USB_DT_CS_INTERFACE;
816 in_ext->bDescriptorSubtype = USB_MS_MIDI_IN_JACK;
817 in_ext->bJackType = USB_MS_EXTERNAL;
818 in_ext->bJackID = jack++;
819 in_ext->iJack = 0;
820 midi_function[i++] = (struct usb_descriptor_header *) in_ext;
821
822 out_emb->bLength = USB_DT_MIDI_OUT_SIZE(1);
823 out_emb->bDescriptorType = USB_DT_CS_INTERFACE;
824 out_emb->bDescriptorSubtype = USB_MS_MIDI_OUT_JACK;
825 out_emb->bJackType = USB_MS_EMBEDDED;
826 out_emb->bJackID = jack++;
827 out_emb->bNrInputPins = 1;
828 out_emb->pins[0].baSourcePin = 1;
829 out_emb->pins[0].baSourceID = in_ext->bJackID;
830 out_emb->iJack = 0;
831 midi_function[i++] = (struct usb_descriptor_header *) out_emb;
832
833 /* link it to the endpoint */
834 ms_in_desc.baAssocJackID[n] = out_emb->bJackID;
846 } 835 }
847 836
848 midi_function[i++] = (struct usb_descriptor_header *) &jack_out_emb_desc; 837 /* configure the external OUT jacks, each linked to an embedded IN jack */
849
850 /* and multiple external OUT jacks ... */
851 for (n = 0; n < midi->out_ports; n++) { 838 for (n = 0; n < midi->out_ports; n++) {
852 struct usb_midi_out_jack_descriptor_1 *ext = &jack_out_ext_desc[n]; 839 struct usb_midi_in_jack_descriptor *in_emb = &jack_in_emb_desc[n];
853 int m; 840 struct usb_midi_out_jack_descriptor_1 *out_ext = &jack_out_ext_desc[n];
854 841
855 ext->bLength = USB_DT_MIDI_OUT_SIZE(1); 842 in_emb->bLength = USB_DT_MIDI_IN_SIZE;
856 ext->bDescriptorType = USB_DT_CS_INTERFACE; 843 in_emb->bDescriptorType = USB_DT_CS_INTERFACE;
857 ext->bDescriptorSubtype = USB_MS_MIDI_OUT_JACK; 844 in_emb->bDescriptorSubtype = USB_MS_MIDI_IN_JACK;
858 ext->bJackType = USB_MS_EXTERNAL; 845 in_emb->bJackType = USB_MS_EMBEDDED;
859 ext->bJackID = jack++; 846 in_emb->bJackID = jack++;
860 ext->bNrInputPins = 1; 847 in_emb->iJack = 0;
861 ext->iJack = 0; 848 midi_function[i++] = (struct usb_descriptor_header *) in_emb;
862 /* ... which all reference the same embedded IN jack */ 849
863 for (m = 0; m < midi->out_ports; m++) { 850 out_ext->bLength = USB_DT_MIDI_OUT_SIZE(1);
864 ext->pins[m].baSourceID = jack_in_emb_desc.bJackID; 851 out_ext->bDescriptorType = USB_DT_CS_INTERFACE;
865 ext->pins[m].baSourcePin = 1; 852 out_ext->bDescriptorSubtype = USB_MS_MIDI_OUT_JACK;
866 } 853 out_ext->bJackType = USB_MS_EXTERNAL;
867 854 out_ext->bJackID = jack++;
868 midi_function[i++] = (struct usb_descriptor_header *) ext; 855 out_ext->bNrInputPins = 1;
856 out_ext->iJack = 0;
857 out_ext->pins[0].baSourceID = in_emb->bJackID;
858 out_ext->pins[0].baSourcePin = 1;
859 midi_function[i++] = (struct usb_descriptor_header *) out_ext;
860
861 /* link it to the endpoint */
862 ms_out_desc.baAssocJackID[n] = in_emb->bJackID;
869 } 863 }
870 864
871 /* configure the endpoint descriptors ... */ 865 /* configure the endpoint descriptors ... */
872 ms_out_desc.bLength = USB_DT_MS_ENDPOINT_SIZE(midi->in_ports); 866 ms_out_desc.bLength = USB_DT_MS_ENDPOINT_SIZE(midi->in_ports);
873 ms_out_desc.bNumEmbMIDIJack = midi->in_ports; 867 ms_out_desc.bNumEmbMIDIJack = midi->in_ports;
874 for (n = 0; n < midi->in_ports; n++)
875 ms_out_desc.baAssocJackID[n] = jack_in_emb_desc.bJackID;
876 868
877 ms_in_desc.bLength = USB_DT_MS_ENDPOINT_SIZE(midi->out_ports); 869 ms_in_desc.bLength = USB_DT_MS_ENDPOINT_SIZE(midi->out_ports);
878 ms_in_desc.bNumEmbMIDIJack = midi->out_ports; 870 ms_in_desc.bNumEmbMIDIJack = midi->out_ports;
879 for (n = 0; n < midi->out_ports; n++)
880 ms_in_desc.baAssocJackID[n] = jack_out_emb_desc.bJackID;
881 871
882 /* ... and add them to the list */ 872 /* ... and add them to the list */
883 midi_function[i++] = (struct usb_descriptor_header *) &bulk_out_desc; 873 midi_function[i++] = (struct usb_descriptor_header *) &bulk_out_desc;
@@ -901,6 +891,8 @@ f_midi_bind(struct usb_configuration *c, struct usb_function *f)
901 f->descriptors = usb_copy_descriptors(midi_function); 891 f->descriptors = usb_copy_descriptors(midi_function);
902 } 892 }
903 893
894 kfree(midi_function);
895
904 return 0; 896 return 0;
905 897
906fail: 898fail:
diff --git a/drivers/usb/gadget/f_phonet.c b/drivers/usb/gadget/f_phonet.c
index 349077033338..16a509ae517b 100644
--- a/drivers/usb/gadget/f_phonet.c
+++ b/drivers/usb/gadget/f_phonet.c
@@ -346,7 +346,7 @@ static void pn_rx_complete(struct usb_ep *ep, struct usb_request *req)
346 } 346 }
347 347
348 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 348 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
349 skb->len == 0, req->actual); 349 skb->len <= 1, req->actual);
350 page = NULL; 350 page = NULL;
351 351
352 if (req->actual < req->length) { /* Last fragment */ 352 if (req->actual < req->length) { /* Last fragment */
diff --git a/drivers/usb/gadget/f_serial.c b/drivers/usb/gadget/f_serial.c
index 91fdf790ed20..cf33a8d0fd5d 100644
--- a/drivers/usb/gadget/f_serial.c
+++ b/drivers/usb/gadget/f_serial.c
@@ -131,8 +131,8 @@ static int gser_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
131 } 131 }
132 if (!gser->port.in->desc || !gser->port.out->desc) { 132 if (!gser->port.in->desc || !gser->port.out->desc) {
133 DBG(cdev, "activate generic ttyGS%d\n", gser->port_num); 133 DBG(cdev, "activate generic ttyGS%d\n", gser->port_num);
134 if (!config_ep_by_speed(cdev->gadget, f, gser->port.in) || 134 if (config_ep_by_speed(cdev->gadget, f, gser->port.in) ||
135 !config_ep_by_speed(cdev->gadget, f, gser->port.out)) { 135 config_ep_by_speed(cdev->gadget, f, gser->port.out)) {
136 gser->port.in->desc = NULL; 136 gser->port.in->desc = NULL;
137 gser->port.out->desc = NULL; 137 gser->port.out->desc = NULL;
138 return -EINVAL; 138 return -EINVAL;
diff --git a/drivers/usb/gadget/file_storage.c b/drivers/usb/gadget/file_storage.c
index f7e39b0365ce..11b5196284ae 100644
--- a/drivers/usb/gadget/file_storage.c
+++ b/drivers/usb/gadget/file_storage.c
@@ -859,7 +859,7 @@ static int class_setup_req(struct fsg_dev *fsg,
859 if (ctrl->bRequestType != (USB_DIR_OUT | 859 if (ctrl->bRequestType != (USB_DIR_OUT |
860 USB_TYPE_CLASS | USB_RECIP_INTERFACE)) 860 USB_TYPE_CLASS | USB_RECIP_INTERFACE))
861 break; 861 break;
862 if (w_index != 0 || w_value != 0) { 862 if (w_index != 0 || w_value != 0 || w_length != 0) {
863 value = -EDOM; 863 value = -EDOM;
864 break; 864 break;
865 } 865 }
@@ -875,7 +875,7 @@ static int class_setup_req(struct fsg_dev *fsg,
875 if (ctrl->bRequestType != (USB_DIR_IN | 875 if (ctrl->bRequestType != (USB_DIR_IN |
876 USB_TYPE_CLASS | USB_RECIP_INTERFACE)) 876 USB_TYPE_CLASS | USB_RECIP_INTERFACE))
877 break; 877 break;
878 if (w_index != 0 || w_value != 0) { 878 if (w_index != 0 || w_value != 0 || w_length != 1) {
879 value = -EDOM; 879 value = -EDOM;
880 break; 880 break;
881 } 881 }
diff --git a/drivers/usb/gadget/fsl_mxc_udc.c b/drivers/usb/gadget/fsl_mxc_udc.c
index 43a49ecc1f36..dcbc0a2e48dd 100644
--- a/drivers/usb/gadget/fsl_mxc_udc.c
+++ b/drivers/usb/gadget/fsl_mxc_udc.c
@@ -16,6 +16,7 @@
16#include <linux/err.h> 16#include <linux/err.h>
17#include <linux/fsl_devices.h> 17#include <linux/fsl_devices.h>
18#include <linux/platform_device.h> 18#include <linux/platform_device.h>
19#include <linux/io.h>
19 20
20#include <mach/hardware.h> 21#include <mach/hardware.h>
21 22
@@ -88,7 +89,6 @@ eenahb:
88void fsl_udc_clk_finalize(struct platform_device *pdev) 89void fsl_udc_clk_finalize(struct platform_device *pdev)
89{ 90{
90 struct fsl_usb2_platform_data *pdata = pdev->dev.platform_data; 91 struct fsl_usb2_platform_data *pdata = pdev->dev.platform_data;
91#if defined(CONFIG_SOC_IMX35)
92 if (cpu_is_mx35()) { 92 if (cpu_is_mx35()) {
93 unsigned int v; 93 unsigned int v;
94 94
@@ -101,7 +101,6 @@ void fsl_udc_clk_finalize(struct platform_device *pdev)
101 USBPHYCTRL_OTGBASE_OFFSET)); 101 USBPHYCTRL_OTGBASE_OFFSET));
102 } 102 }
103 } 103 }
104#endif
105 104
106 /* ULPI transceivers don't need usbpll */ 105 /* ULPI transceivers don't need usbpll */
107 if (pdata->phy_mode == FSL_USB2_PHY_ULPI) { 106 if (pdata->phy_mode == FSL_USB2_PHY_ULPI) {
diff --git a/drivers/usb/gadget/fsl_qe_udc.c b/drivers/usb/gadget/fsl_qe_udc.c
index 2a03e4de11c1..e00cf92409ce 100644
--- a/drivers/usb/gadget/fsl_qe_udc.c
+++ b/drivers/usb/gadget/fsl_qe_udc.c
@@ -2336,8 +2336,7 @@ static int fsl_qe_start(struct usb_gadget_driver *driver,
2336 if (!udc_controller) 2336 if (!udc_controller)
2337 return -ENODEV; 2337 return -ENODEV;
2338 2338
2339 if (!driver || (driver->speed != USB_SPEED_FULL 2339 if (!driver || driver->speed < USB_SPEED_FULL
2340 && driver->speed != USB_SPEED_HIGH)
2341 || !bind || !driver->disconnect || !driver->setup) 2340 || !bind || !driver->disconnect || !driver->setup)
2342 return -EINVAL; 2341 return -EINVAL;
2343 2342
diff --git a/drivers/usb/gadget/fsl_udc_core.c b/drivers/usb/gadget/fsl_udc_core.c
index b2c44e1d5813..dd28ef3def71 100644
--- a/drivers/usb/gadget/fsl_udc_core.c
+++ b/drivers/usb/gadget/fsl_udc_core.c
@@ -696,12 +696,31 @@ static void fsl_free_request(struct usb_ep *_ep, struct usb_request *_req)
696 kfree(req); 696 kfree(req);
697} 697}
698 698
699/*-------------------------------------------------------------------------*/ 699/* Actually add a dTD chain to an empty dQH and let go */
700static void fsl_prime_ep(struct fsl_ep *ep, struct ep_td_struct *td)
701{
702 struct ep_queue_head *qh = get_qh_by_ep(ep);
703
704 /* Write dQH next pointer and terminate bit to 0 */
705 qh->next_dtd_ptr = cpu_to_hc32(td->td_dma
706 & EP_QUEUE_HEAD_NEXT_POINTER_MASK);
707
708 /* Clear active and halt bit */
709 qh->size_ioc_int_sts &= cpu_to_hc32(~(EP_QUEUE_HEAD_STATUS_ACTIVE
710 | EP_QUEUE_HEAD_STATUS_HALT));
711
712 /* Ensure that updates to the QH will occur before priming. */
713 wmb();
714
715 /* Prime endpoint by writing correct bit to ENDPTPRIME */
716 fsl_writel(ep_is_in(ep) ? (1 << (ep_index(ep) + 16))
717 : (1 << (ep_index(ep))), &dr_regs->endpointprime);
718}
719
720/* Add dTD chain to the dQH of an EP */
700static void fsl_queue_td(struct fsl_ep *ep, struct fsl_req *req) 721static void fsl_queue_td(struct fsl_ep *ep, struct fsl_req *req)
701{ 722{
702 int i = ep_index(ep) * 2 + ep_is_in(ep);
703 u32 temp, bitmask, tmp_stat; 723 u32 temp, bitmask, tmp_stat;
704 struct ep_queue_head *dQH = &ep->udc->ep_qh[i];
705 724
706 /* VDBG("QH addr Register 0x%8x", dr_regs->endpointlistaddr); 725 /* VDBG("QH addr Register 0x%8x", dr_regs->endpointlistaddr);
707 VDBG("ep_qh[%d] addr is 0x%8x", i, (u32)&(ep->udc->ep_qh[i])); */ 726 VDBG("ep_qh[%d] addr is 0x%8x", i, (u32)&(ep->udc->ep_qh[i])); */
@@ -719,7 +738,7 @@ static void fsl_queue_td(struct fsl_ep *ep, struct fsl_req *req)
719 cpu_to_hc32(req->head->td_dma & DTD_ADDR_MASK); 738 cpu_to_hc32(req->head->td_dma & DTD_ADDR_MASK);
720 /* Read prime bit, if 1 goto done */ 739 /* Read prime bit, if 1 goto done */
721 if (fsl_readl(&dr_regs->endpointprime) & bitmask) 740 if (fsl_readl(&dr_regs->endpointprime) & bitmask)
722 goto out; 741 return;
723 742
724 do { 743 do {
725 /* Set ATDTW bit in USBCMD */ 744 /* Set ATDTW bit in USBCMD */
@@ -736,28 +755,10 @@ static void fsl_queue_td(struct fsl_ep *ep, struct fsl_req *req)
736 fsl_writel(temp & ~USB_CMD_ATDTW, &dr_regs->usbcmd); 755 fsl_writel(temp & ~USB_CMD_ATDTW, &dr_regs->usbcmd);
737 756
738 if (tmp_stat) 757 if (tmp_stat)
739 goto out; 758 return;
740 } 759 }
741 760
742 /* Write dQH next pointer and terminate bit to 0 */ 761 fsl_prime_ep(ep, req->head);
743 temp = req->head->td_dma & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
744 dQH->next_dtd_ptr = cpu_to_hc32(temp);
745
746 /* Clear active and halt bit */
747 temp = cpu_to_hc32(~(EP_QUEUE_HEAD_STATUS_ACTIVE
748 | EP_QUEUE_HEAD_STATUS_HALT));
749 dQH->size_ioc_int_sts &= temp;
750
751 /* Ensure that updates to the QH will occur before priming. */
752 wmb();
753
754 /* Prime endpoint by writing 1 to ENDPTPRIME */
755 temp = ep_is_in(ep)
756 ? (1 << (ep_index(ep) + 16))
757 : (1 << (ep_index(ep)));
758 fsl_writel(temp, &dr_regs->endpointprime);
759out:
760 return;
761} 762}
762 763
763/* Fill in the dTD structure 764/* Fill in the dTD structure
@@ -877,7 +878,7 @@ fsl_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
877 VDBG("%s, bad ep", __func__); 878 VDBG("%s, bad ep", __func__);
878 return -EINVAL; 879 return -EINVAL;
879 } 880 }
880 if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) { 881 if (usb_endpoint_xfer_isoc(ep->desc)) {
881 if (req->req.length > ep->ep.maxpacket) 882 if (req->req.length > ep->ep.maxpacket)
882 return -EMSGSIZE; 883 return -EMSGSIZE;
883 } 884 }
@@ -973,25 +974,20 @@ static int fsl_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
973 974
974 /* The request isn't the last request in this ep queue */ 975 /* The request isn't the last request in this ep queue */
975 if (req->queue.next != &ep->queue) { 976 if (req->queue.next != &ep->queue) {
976 struct ep_queue_head *qh;
977 struct fsl_req *next_req; 977 struct fsl_req *next_req;
978 978
979 qh = ep->qh;
980 next_req = list_entry(req->queue.next, struct fsl_req, 979 next_req = list_entry(req->queue.next, struct fsl_req,
981 queue); 980 queue);
982 981
983 /* Point the QH to the first TD of next request */ 982 /* prime with dTD of next request */
984 fsl_writel((u32) next_req->head, &qh->curr_dtd_ptr); 983 fsl_prime_ep(ep, next_req->head);
985 } 984 }
986 985 /* The request hasn't been processed, patch up the TD chain */
987 /* The request hasn't been processed, patch up the TD chain */
988 } else { 986 } else {
989 struct fsl_req *prev_req; 987 struct fsl_req *prev_req;
990 988
991 prev_req = list_entry(req->queue.prev, struct fsl_req, queue); 989 prev_req = list_entry(req->queue.prev, struct fsl_req, queue);
992 fsl_writel(fsl_readl(&req->tail->next_td_ptr), 990 prev_req->tail->next_td_ptr = req->tail->next_td_ptr;
993 &prev_req->tail->next_td_ptr);
994
995 } 991 }
996 992
997 done(ep, req, -ECONNRESET); 993 done(ep, req, -ECONNRESET);
@@ -1032,7 +1028,7 @@ static int fsl_ep_set_halt(struct usb_ep *_ep, int value)
1032 goto out; 1028 goto out;
1033 } 1029 }
1034 1030
1035 if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) { 1031 if (usb_endpoint_xfer_isoc(ep->desc)) {
1036 status = -EOPNOTSUPP; 1032 status = -EOPNOTSUPP;
1037 goto out; 1033 goto out;
1038 } 1034 }
@@ -1068,7 +1064,7 @@ static int fsl_ep_fifo_status(struct usb_ep *_ep)
1068 struct fsl_udc *udc; 1064 struct fsl_udc *udc;
1069 int size = 0; 1065 int size = 0;
1070 u32 bitmask; 1066 u32 bitmask;
1071 struct ep_queue_head *d_qh; 1067 struct ep_queue_head *qh;
1072 1068
1073 ep = container_of(_ep, struct fsl_ep, ep); 1069 ep = container_of(_ep, struct fsl_ep, ep);
1074 if (!_ep || (!ep->desc && ep_index(ep) != 0)) 1070 if (!_ep || (!ep->desc && ep_index(ep) != 0))
@@ -1079,13 +1075,13 @@ static int fsl_ep_fifo_status(struct usb_ep *_ep)
1079 if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN) 1075 if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
1080 return -ESHUTDOWN; 1076 return -ESHUTDOWN;
1081 1077
1082 d_qh = &ep->udc->ep_qh[ep_index(ep) * 2 + ep_is_in(ep)]; 1078 qh = get_qh_by_ep(ep);
1083 1079
1084 bitmask = (ep_is_in(ep)) ? (1 << (ep_index(ep) + 16)) : 1080 bitmask = (ep_is_in(ep)) ? (1 << (ep_index(ep) + 16)) :
1085 (1 << (ep_index(ep))); 1081 (1 << (ep_index(ep)));
1086 1082
1087 if (fsl_readl(&dr_regs->endptstatus) & bitmask) 1083 if (fsl_readl(&dr_regs->endptstatus) & bitmask)
1088 size = (d_qh->size_ioc_int_sts & DTD_PACKET_SIZE) 1084 size = (qh->size_ioc_int_sts & DTD_PACKET_SIZE)
1089 >> DTD_LENGTH_BIT_POS; 1085 >> DTD_LENGTH_BIT_POS;
1090 1086
1091 pr_debug("%s %u\n", __func__, size); 1087 pr_debug("%s %u\n", __func__, size);
@@ -1717,7 +1713,7 @@ static void dtd_complete_irq(struct fsl_udc *udc)
1717 1713
1718static inline enum usb_device_speed portscx_device_speed(u32 reg) 1714static inline enum usb_device_speed portscx_device_speed(u32 reg)
1719{ 1715{
1720 switch (speed & PORTSCX_PORT_SPEED_MASK) { 1716 switch (reg & PORTSCX_PORT_SPEED_MASK) {
1721 case PORTSCX_PORT_SPEED_HIGH: 1717 case PORTSCX_PORT_SPEED_HIGH:
1722 return USB_SPEED_HIGH; 1718 return USB_SPEED_HIGH;
1723 case PORTSCX_PORT_SPEED_FULL: 1719 case PORTSCX_PORT_SPEED_FULL:
@@ -1938,8 +1934,7 @@ static int fsl_start(struct usb_gadget_driver *driver,
1938 if (!udc_controller) 1934 if (!udc_controller)
1939 return -ENODEV; 1935 return -ENODEV;
1940 1936
1941 if (!driver || (driver->speed != USB_SPEED_FULL 1937 if (!driver || driver->speed < USB_SPEED_FULL
1942 && driver->speed != USB_SPEED_HIGH)
1943 || !bind || !driver->disconnect || !driver->setup) 1938 || !bind || !driver->disconnect || !driver->setup)
1944 return -EINVAL; 1939 return -EINVAL;
1945 1940
@@ -2480,8 +2475,7 @@ static int __init fsl_udc_probe(struct platform_device *pdev)
2480 2475
2481#ifndef CONFIG_ARCH_MXC 2476#ifndef CONFIG_ARCH_MXC
2482 if (pdata->have_sysif_regs) 2477 if (pdata->have_sysif_regs)
2483 usb_sys_regs = (struct usb_sys_interface *) 2478 usb_sys_regs = (void *)dr_regs + USB_DR_SYS_OFFSET;
2484 ((u32)dr_regs + USB_DR_SYS_OFFSET);
2485#endif 2479#endif
2486 2480
2487 /* Initialize USB clocks */ 2481 /* Initialize USB clocks */
diff --git a/drivers/usb/gadget/fsl_usb2_udc.h b/drivers/usb/gadget/fsl_usb2_udc.h
index 1d51be83fda8..f781f5dec417 100644
--- a/drivers/usb/gadget/fsl_usb2_udc.h
+++ b/drivers/usb/gadget/fsl_usb2_udc.h
@@ -569,6 +569,16 @@ static void dump_msg(const char *label, const u8 * buf, unsigned int length)
569 * 2 + ((windex & USB_DIR_IN) ? 1 : 0)) 569 * 2 + ((windex & USB_DIR_IN) ? 1 : 0))
570#define get_pipe_by_ep(EP) (ep_index(EP) * 2 + ep_is_in(EP)) 570#define get_pipe_by_ep(EP) (ep_index(EP) * 2 + ep_is_in(EP))
571 571
572static inline struct ep_queue_head *get_qh_by_ep(struct fsl_ep *ep)
573{
574 /* we only have one ep0 structure but two queue heads */
575 if (ep_index(ep) != 0)
576 return ep->qh;
577 else
578 return &ep->udc->ep_qh[(ep->udc->ep0_dir ==
579 USB_DIR_IN) ? 1 : 0];
580}
581
572struct platform_device; 582struct platform_device;
573#ifdef CONFIG_ARCH_MXC 583#ifdef CONFIG_ARCH_MXC
574int fsl_udc_clk_init(struct platform_device *pdev); 584int fsl_udc_clk_init(struct platform_device *pdev);
diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
index a392ec0d2d51..6ccae2707e59 100644
--- a/drivers/usb/gadget/inode.c
+++ b/drivers/usb/gadget/inode.c
@@ -1730,8 +1730,9 @@ static void
1730gadgetfs_disconnect (struct usb_gadget *gadget) 1730gadgetfs_disconnect (struct usb_gadget *gadget)
1731{ 1731{
1732 struct dev_data *dev = get_gadget_data (gadget); 1732 struct dev_data *dev = get_gadget_data (gadget);
1733 unsigned long flags;
1733 1734
1734 spin_lock (&dev->lock); 1735 spin_lock_irqsave (&dev->lock, flags);
1735 if (dev->state == STATE_DEV_UNCONNECTED) 1736 if (dev->state == STATE_DEV_UNCONNECTED)
1736 goto exit; 1737 goto exit;
1737 dev->state = STATE_DEV_UNCONNECTED; 1738 dev->state = STATE_DEV_UNCONNECTED;
@@ -1740,7 +1741,7 @@ gadgetfs_disconnect (struct usb_gadget *gadget)
1740 next_event (dev, GADGETFS_DISCONNECT); 1741 next_event (dev, GADGETFS_DISCONNECT);
1741 ep0_readable (dev); 1742 ep0_readable (dev);
1742exit: 1743exit:
1743 spin_unlock (&dev->lock); 1744 spin_unlock_irqrestore (&dev->lock, flags);
1744} 1745}
1745 1746
1746static void 1747static void
diff --git a/drivers/usb/gadget/m66592-udc.c b/drivers/usb/gadget/m66592-udc.c
index 91d0af2a24a8..9aa1cbbee45b 100644
--- a/drivers/usb/gadget/m66592-udc.c
+++ b/drivers/usb/gadget/m66592-udc.c
@@ -1472,7 +1472,7 @@ static int m66592_start(struct usb_gadget_driver *driver,
1472 int retval; 1472 int retval;
1473 1473
1474 if (!driver 1474 if (!driver
1475 || driver->speed != USB_SPEED_HIGH 1475 || driver->speed < USB_SPEED_HIGH
1476 || !bind 1476 || !bind
1477 || !driver->setup) 1477 || !driver->setup)
1478 return -EINVAL; 1478 return -EINVAL;
diff --git a/drivers/usb/gadget/net2280.c b/drivers/usb/gadget/net2280.c
index 7f1bc9a73cda..da2b9d0be3ca 100644
--- a/drivers/usb/gadget/net2280.c
+++ b/drivers/usb/gadget/net2280.c
@@ -1881,7 +1881,7 @@ static int net2280_start(struct usb_gadget *_gadget,
1881 * (dev->usb->xcvrdiag & FORCE_FULL_SPEED_MODE) 1881 * (dev->usb->xcvrdiag & FORCE_FULL_SPEED_MODE)
1882 * "must not be used in normal operation" 1882 * "must not be used in normal operation"
1883 */ 1883 */
1884 if (!driver || driver->speed != USB_SPEED_HIGH 1884 if (!driver || driver->speed < USB_SPEED_HIGH
1885 || !driver->setup) 1885 || !driver->setup)
1886 return -EINVAL; 1886 return -EINVAL;
1887 1887
diff --git a/drivers/usb/gadget/pch_udc.c b/drivers/usb/gadget/pch_udc.c
index 550d6dcdf104..5048a0c07640 100644
--- a/drivers/usb/gadget/pch_udc.c
+++ b/drivers/usb/gadget/pch_udc.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2010 OKI SEMICONDUCTOR CO., LTD. 2 * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by 5 * it under the terms of the GNU General Public License as published by
@@ -354,6 +354,7 @@ struct pch_udc_dev {
354#define PCI_DEVICE_ID_INTEL_EG20T_UDC 0x8808 354#define PCI_DEVICE_ID_INTEL_EG20T_UDC 0x8808
355#define PCI_VENDOR_ID_ROHM 0x10DB 355#define PCI_VENDOR_ID_ROHM 0x10DB
356#define PCI_DEVICE_ID_ML7213_IOH_UDC 0x801D 356#define PCI_DEVICE_ID_ML7213_IOH_UDC 0x801D
357#define PCI_DEVICE_ID_ML7831_IOH_UDC 0x8808
357 358
358static const char ep0_string[] = "ep0in"; 359static const char ep0_string[] = "ep0in";
359static DEFINE_SPINLOCK(udc_stall_spinlock); /* stall spin lock */ 360static DEFINE_SPINLOCK(udc_stall_spinlock); /* stall spin lock */
@@ -2970,6 +2971,11 @@ static DEFINE_PCI_DEVICE_TABLE(pch_udc_pcidev_id) = {
2970 .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe, 2971 .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
2971 .class_mask = 0xffffffff, 2972 .class_mask = 0xffffffff,
2972 }, 2973 },
2974 {
2975 PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ML7831_IOH_UDC),
2976 .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
2977 .class_mask = 0xffffffff,
2978 },
2973 { 0 }, 2979 { 0 },
2974}; 2980};
2975 2981
@@ -2999,5 +3005,5 @@ static void __exit pch_udc_pci_exit(void)
2999module_exit(pch_udc_pci_exit); 3005module_exit(pch_udc_pci_exit);
3000 3006
3001MODULE_DESCRIPTION("Intel EG20T USB Device Controller"); 3007MODULE_DESCRIPTION("Intel EG20T USB Device Controller");
3002MODULE_AUTHOR("OKI SEMICONDUCTOR, <toshiharu-linux@dsn.okisemi.com>"); 3008MODULE_AUTHOR("LAPIS Semiconductor, <tomoya-linux@dsn.lapis-semi.com>");
3003MODULE_LICENSE("GPL"); 3009MODULE_LICENSE("GPL");
diff --git a/drivers/usb/gadget/r8a66597-udc.c b/drivers/usb/gadget/r8a66597-udc.c
index 68a826a1b866..fc719a3f8557 100644
--- a/drivers/usb/gadget/r8a66597-udc.c
+++ b/drivers/usb/gadget/r8a66597-udc.c
@@ -1718,6 +1718,8 @@ static void r8a66597_fifo_flush(struct usb_ep *_ep)
1718 if (list_empty(&ep->queue) && !ep->busy) { 1718 if (list_empty(&ep->queue) && !ep->busy) {
1719 pipe_stop(ep->r8a66597, ep->pipenum); 1719 pipe_stop(ep->r8a66597, ep->pipenum);
1720 r8a66597_bclr(ep->r8a66597, BCLR, ep->fifoctr); 1720 r8a66597_bclr(ep->r8a66597, BCLR, ep->fifoctr);
1721 r8a66597_write(ep->r8a66597, ACLRM, ep->pipectr);
1722 r8a66597_write(ep->r8a66597, 0, ep->pipectr);
1721 } 1723 }
1722 spin_unlock_irqrestore(&ep->r8a66597->lock, flags); 1724 spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
1723} 1725}
@@ -1742,26 +1744,16 @@ static int r8a66597_start(struct usb_gadget *gadget,
1742 struct usb_gadget_driver *driver) 1744 struct usb_gadget_driver *driver)
1743{ 1745{
1744 struct r8a66597 *r8a66597 = gadget_to_r8a66597(gadget); 1746 struct r8a66597 *r8a66597 = gadget_to_r8a66597(gadget);
1745 int retval;
1746 1747
1747 if (!driver 1748 if (!driver
1748 || driver->speed != USB_SPEED_HIGH 1749 || driver->speed < USB_SPEED_HIGH
1749 || !driver->setup) 1750 || !driver->setup)
1750 return -EINVAL; 1751 return -EINVAL;
1751 if (!r8a66597) 1752 if (!r8a66597)
1752 return -ENODEV; 1753 return -ENODEV;
1753 1754
1754 /* hook up the driver */ 1755 /* hook up the driver */
1755 driver->driver.bus = NULL;
1756 r8a66597->driver = driver; 1756 r8a66597->driver = driver;
1757 r8a66597->gadget.dev.driver = &driver->driver;
1758
1759 retval = device_add(&r8a66597->gadget.dev);
1760 if (retval) {
1761 dev_err(r8a66597_to_dev(r8a66597), "device_add error (%d)\n",
1762 retval);
1763 goto error;
1764 }
1765 1757
1766 init_controller(r8a66597); 1758 init_controller(r8a66597);
1767 r8a66597_bset(r8a66597, VBSE, INTENB0); 1759 r8a66597_bset(r8a66597, VBSE, INTENB0);
@@ -1775,12 +1767,6 @@ static int r8a66597_start(struct usb_gadget *gadget,
1775 } 1767 }
1776 1768
1777 return 0; 1769 return 0;
1778
1779error:
1780 r8a66597->driver = NULL;
1781 r8a66597->gadget.dev.driver = NULL;
1782
1783 return retval;
1784} 1770}
1785 1771
1786static int r8a66597_stop(struct usb_gadget *gadget, 1772static int r8a66597_stop(struct usb_gadget *gadget,
@@ -1794,7 +1780,6 @@ static int r8a66597_stop(struct usb_gadget *gadget,
1794 disable_controller(r8a66597); 1780 disable_controller(r8a66597);
1795 spin_unlock_irqrestore(&r8a66597->lock, flags); 1781 spin_unlock_irqrestore(&r8a66597->lock, flags);
1796 1782
1797 device_del(&r8a66597->gadget.dev);
1798 r8a66597->driver = NULL; 1783 r8a66597->driver = NULL;
1799 return 0; 1784 return 0;
1800} 1785}
@@ -1845,6 +1830,7 @@ static int __exit r8a66597_remove(struct platform_device *pdev)
1845 clk_put(r8a66597->clk); 1830 clk_put(r8a66597->clk);
1846 } 1831 }
1847#endif 1832#endif
1833 device_unregister(&r8a66597->gadget.dev);
1848 kfree(r8a66597); 1834 kfree(r8a66597);
1849 return 0; 1835 return 0;
1850} 1836}
@@ -1924,13 +1910,17 @@ static int __init r8a66597_probe(struct platform_device *pdev)
1924 r8a66597->irq_sense_low = irq_trigger == IRQF_TRIGGER_LOW; 1910 r8a66597->irq_sense_low = irq_trigger == IRQF_TRIGGER_LOW;
1925 1911
1926 r8a66597->gadget.ops = &r8a66597_gadget_ops; 1912 r8a66597->gadget.ops = &r8a66597_gadget_ops;
1927 device_initialize(&r8a66597->gadget.dev);
1928 dev_set_name(&r8a66597->gadget.dev, "gadget"); 1913 dev_set_name(&r8a66597->gadget.dev, "gadget");
1929 r8a66597->gadget.is_dualspeed = 1; 1914 r8a66597->gadget.is_dualspeed = 1;
1930 r8a66597->gadget.dev.parent = &pdev->dev; 1915 r8a66597->gadget.dev.parent = &pdev->dev;
1931 r8a66597->gadget.dev.dma_mask = pdev->dev.dma_mask; 1916 r8a66597->gadget.dev.dma_mask = pdev->dev.dma_mask;
1932 r8a66597->gadget.dev.release = pdev->dev.release; 1917 r8a66597->gadget.dev.release = pdev->dev.release;
1933 r8a66597->gadget.name = udc_name; 1918 r8a66597->gadget.name = udc_name;
1919 ret = device_register(&r8a66597->gadget.dev);
1920 if (ret < 0) {
1921 dev_err(&pdev->dev, "device_register failed\n");
1922 goto clean_up;
1923 }
1934 1924
1935 init_timer(&r8a66597->timer); 1925 init_timer(&r8a66597->timer);
1936 r8a66597->timer.function = r8a66597_timer; 1926 r8a66597->timer.function = r8a66597_timer;
@@ -1945,7 +1935,7 @@ static int __init r8a66597_probe(struct platform_device *pdev)
1945 dev_err(&pdev->dev, "cannot get clock \"%s\"\n", 1935 dev_err(&pdev->dev, "cannot get clock \"%s\"\n",
1946 clk_name); 1936 clk_name);
1947 ret = PTR_ERR(r8a66597->clk); 1937 ret = PTR_ERR(r8a66597->clk);
1948 goto clean_up; 1938 goto clean_up_dev;
1949 } 1939 }
1950 clk_enable(r8a66597->clk); 1940 clk_enable(r8a66597->clk);
1951 } 1941 }
@@ -2014,7 +2004,9 @@ clean_up2:
2014 clk_disable(r8a66597->clk); 2004 clk_disable(r8a66597->clk);
2015 clk_put(r8a66597->clk); 2005 clk_put(r8a66597->clk);
2016 } 2006 }
2007clean_up_dev:
2017#endif 2008#endif
2009 device_unregister(&r8a66597->gadget.dev);
2018clean_up: 2010clean_up:
2019 if (r8a66597) { 2011 if (r8a66597) {
2020 if (r8a66597->sudmac_reg) 2012 if (r8a66597->sudmac_reg)
diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c
index a552453dc946..b31448229f0b 100644
--- a/drivers/usb/gadget/s3c-hsotg.c
+++ b/drivers/usb/gadget/s3c-hsotg.c
@@ -2586,10 +2586,8 @@ static int s3c_hsotg_start(struct usb_gadget_driver *driver,
2586 return -EINVAL; 2586 return -EINVAL;
2587 } 2587 }
2588 2588
2589 if (driver->speed != USB_SPEED_HIGH && 2589 if (driver->speed < USB_SPEED_FULL)
2590 driver->speed != USB_SPEED_FULL) {
2591 dev_err(hsotg->dev, "%s: bad speed\n", __func__); 2590 dev_err(hsotg->dev, "%s: bad speed\n", __func__);
2592 }
2593 2591
2594 if (!bind || !driver->setup) { 2592 if (!bind || !driver->setup) {
2595 dev_err(hsotg->dev, "%s: missing entry points\n", __func__); 2593 dev_err(hsotg->dev, "%s: missing entry points\n", __func__);
diff --git a/drivers/usb/gadget/s3c-hsudc.c b/drivers/usb/gadget/s3c-hsudc.c
index 8d54f893cefe..20a553b46aed 100644
--- a/drivers/usb/gadget/s3c-hsudc.c
+++ b/drivers/usb/gadget/s3c-hsudc.c
@@ -1142,8 +1142,7 @@ static int s3c_hsudc_start(struct usb_gadget_driver *driver,
1142 int ret; 1142 int ret;
1143 1143
1144 if (!driver 1144 if (!driver
1145 || (driver->speed != USB_SPEED_FULL && 1145 || driver->speed < USB_SPEED_FULL
1146 driver->speed != USB_SPEED_HIGH)
1147 || !bind 1146 || !bind
1148 || !driver->unbind || !driver->disconnect || !driver->setup) 1147 || !driver->unbind || !driver->disconnect || !driver->setup)
1149 return -EINVAL; 1148 return -EINVAL;
diff --git a/drivers/usb/gadget/udc-core.c b/drivers/usb/gadget/udc-core.c
index 022baeca7c94..6939e17f4580 100644
--- a/drivers/usb/gadget/udc-core.c
+++ b/drivers/usb/gadget/udc-core.c
@@ -210,10 +210,10 @@ static void usb_gadget_remove_driver(struct usb_udc *udc)
210 kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE); 210 kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
211 211
212 if (udc_is_newstyle(udc)) { 212 if (udc_is_newstyle(udc)) {
213 usb_gadget_disconnect(udc->gadget); 213 udc->driver->disconnect(udc->gadget);
214 udc->driver->unbind(udc->gadget); 214 udc->driver->unbind(udc->gadget);
215 usb_gadget_udc_stop(udc->gadget, udc->driver); 215 usb_gadget_udc_stop(udc->gadget, udc->driver);
216 216 usb_gadget_disconnect(udc->gadget);
217 } else { 217 } else {
218 usb_gadget_stop(udc->gadget, udc->driver); 218 usb_gadget_stop(udc->gadget, udc->driver);
219 } 219 }
@@ -344,7 +344,7 @@ EXPORT_SYMBOL_GPL(usb_gadget_unregister_driver);
344static ssize_t usb_udc_srp_store(struct device *dev, 344static ssize_t usb_udc_srp_store(struct device *dev,
345 struct device_attribute *attr, const char *buf, size_t n) 345 struct device_attribute *attr, const char *buf, size_t n)
346{ 346{
347 struct usb_udc *udc = dev_get_drvdata(dev); 347 struct usb_udc *udc = container_of(dev, struct usb_udc, dev);
348 348
349 if (sysfs_streq(buf, "1")) 349 if (sysfs_streq(buf, "1"))
350 usb_gadget_wakeup(udc->gadget); 350 usb_gadget_wakeup(udc->gadget);
@@ -378,7 +378,7 @@ static ssize_t usb_udc_speed_show(struct device *dev,
378 return snprintf(buf, PAGE_SIZE, "%s\n", 378 return snprintf(buf, PAGE_SIZE, "%s\n",
379 usb_speed_string(udc->gadget->speed)); 379 usb_speed_string(udc->gadget->speed));
380} 380}
381static DEVICE_ATTR(speed, S_IRUSR, usb_udc_speed_show, NULL); 381static DEVICE_ATTR(speed, S_IRUGO, usb_udc_speed_show, NULL);
382 382
383#define USB_UDC_ATTR(name) \ 383#define USB_UDC_ATTR(name) \
384ssize_t usb_udc_##name##_show(struct device *dev, \ 384ssize_t usb_udc_##name##_show(struct device *dev, \
@@ -389,7 +389,7 @@ ssize_t usb_udc_##name##_show(struct device *dev, \
389 \ 389 \
390 return snprintf(buf, PAGE_SIZE, "%d\n", gadget->name); \ 390 return snprintf(buf, PAGE_SIZE, "%d\n", gadget->name); \
391} \ 391} \
392static DEVICE_ATTR(name, S_IRUSR, usb_udc_##name##_show, NULL) 392static DEVICE_ATTR(name, S_IRUGO, usb_udc_##name##_show, NULL)
393 393
394static USB_UDC_ATTR(is_dualspeed); 394static USB_UDC_ATTR(is_dualspeed);
395static USB_UDC_ATTR(is_otg); 395static USB_UDC_ATTR(is_otg);
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index 2e829fae6482..a60679cbbf85 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -1475,30 +1475,36 @@ iso_stream_schedule (
1475 * jump until after the queue is primed. 1475 * jump until after the queue is primed.
1476 */ 1476 */
1477 else { 1477 else {
1478 int done = 0;
1478 start = SCHEDULE_SLOP + (now & ~0x07); 1479 start = SCHEDULE_SLOP + (now & ~0x07);
1479 1480
1480 /* NOTE: assumes URB_ISO_ASAP, to limit complexity/bugs */ 1481 /* NOTE: assumes URB_ISO_ASAP, to limit complexity/bugs */
1481 1482
1482 /* find a uframe slot with enough bandwidth */ 1483 /* find a uframe slot with enough bandwidth.
1483 next = start + period; 1484 * Early uframes are more precious because full-speed
1484 for (; start < next; start++) { 1485 * iso IN transfers can't use late uframes,
1485 1486 * and therefore they should be allocated last.
1487 */
1488 next = start;
1489 start += period;
1490 do {
1491 start--;
1486 /* check schedule: enough space? */ 1492 /* check schedule: enough space? */
1487 if (stream->highspeed) { 1493 if (stream->highspeed) {
1488 if (itd_slot_ok(ehci, mod, start, 1494 if (itd_slot_ok(ehci, mod, start,
1489 stream->usecs, period)) 1495 stream->usecs, period))
1490 break; 1496 done = 1;
1491 } else { 1497 } else {
1492 if ((start % 8) >= 6) 1498 if ((start % 8) >= 6)
1493 continue; 1499 continue;
1494 if (sitd_slot_ok(ehci, mod, stream, 1500 if (sitd_slot_ok(ehci, mod, stream,
1495 start, sched, period)) 1501 start, sched, period))
1496 break; 1502 done = 1;
1497 } 1503 }
1498 } 1504 } while (start > next && !done);
1499 1505
1500 /* no room in the schedule */ 1506 /* no room in the schedule */
1501 if (start == next) { 1507 if (!done) {
1502 ehci_dbg(ehci, "iso resched full %p (now %d max %d)\n", 1508 ehci_dbg(ehci, "iso resched full %p (now %d max %d)\n",
1503 urb, now, now + mod); 1509 urb, now, now + mod);
1504 status = -ENOSPC; 1510 status = -ENOSPC;
diff --git a/drivers/usb/host/ehci-xls.c b/drivers/usb/host/ehci-xls.c
index fe74bd676018..b4fb511d24bc 100644
--- a/drivers/usb/host/ehci-xls.c
+++ b/drivers/usb/host/ehci-xls.c
@@ -19,7 +19,7 @@ static int ehci_xls_setup(struct usb_hcd *hcd)
19 19
20 ehci->caps = hcd->regs; 20 ehci->caps = hcd->regs;
21 ehci->regs = hcd->regs + 21 ehci->regs = hcd->regs +
22 HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase)); 22 HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
23 dbg_hcs_params(ehci, "reset"); 23 dbg_hcs_params(ehci, "reset");
24 dbg_hcc_params(ehci, "reset"); 24 dbg_hcc_params(ehci, "reset");
25 25
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index ba3a46b78b75..95a9fec38e89 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -223,6 +223,9 @@ static void ohci_at91_usb_set_power(struct at91_usbh_data *pdata, int port, int
223 if (port < 0 || port >= 2) 223 if (port < 0 || port >= 2)
224 return; 224 return;
225 225
226 if (pdata->vbus_pin[port] <= 0)
227 return;
228
226 gpio_set_value(pdata->vbus_pin[port], !pdata->vbus_pin_inverted ^ enable); 229 gpio_set_value(pdata->vbus_pin[port], !pdata->vbus_pin_inverted ^ enable);
227} 230}
228 231
@@ -231,6 +234,9 @@ static int ohci_at91_usb_get_power(struct at91_usbh_data *pdata, int port)
231 if (port < 0 || port >= 2) 234 if (port < 0 || port >= 2)
232 return -EINVAL; 235 return -EINVAL;
233 236
237 if (pdata->vbus_pin[port] <= 0)
238 return -EINVAL;
239
234 return gpio_get_value(pdata->vbus_pin[port]) ^ !pdata->vbus_pin_inverted; 240 return gpio_get_value(pdata->vbus_pin[port]) ^ !pdata->vbus_pin_inverted;
235} 241}
236 242
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 34efd479e068..b2639191549e 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -389,17 +389,14 @@ ohci_shutdown (struct usb_hcd *hcd)
389 struct ohci_hcd *ohci; 389 struct ohci_hcd *ohci;
390 390
391 ohci = hcd_to_ohci (hcd); 391 ohci = hcd_to_ohci (hcd);
392 ohci_writel (ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable); 392 ohci_writel(ohci, (u32) ~0, &ohci->regs->intrdisable);
393 ohci->hc_control = ohci_readl(ohci, &ohci->regs->control);
394 393
395 /* If the SHUTDOWN quirk is set, don't put the controller in RESET */ 394 /* Software reset, after which the controller goes into SUSPEND */
396 ohci->hc_control &= (ohci->flags & OHCI_QUIRK_SHUTDOWN ? 395 ohci_writel(ohci, OHCI_HCR, &ohci->regs->cmdstatus);
397 OHCI_CTRL_RWC | OHCI_CTRL_HCFS : 396 ohci_readl(ohci, &ohci->regs->cmdstatus); /* flush the writes */
398 OHCI_CTRL_RWC); 397 udelay(10);
399 ohci_writel(ohci, ohci->hc_control, &ohci->regs->control);
400 398
401 /* flush the writes */ 399 ohci_writel(ohci, ohci->fminterval, &ohci->regs->fminterval);
402 (void) ohci_readl (ohci, &ohci->regs->control);
403} 400}
404 401
405static int check_ed(struct ohci_hcd *ohci, struct ed *ed) 402static int check_ed(struct ohci_hcd *ohci, struct ed *ed)
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
index ad8166c681e2..bc01b064585a 100644
--- a/drivers/usb/host/ohci-pci.c
+++ b/drivers/usb/host/ohci-pci.c
@@ -175,28 +175,6 @@ static int ohci_quirk_amd700(struct usb_hcd *hcd)
175 return 0; 175 return 0;
176} 176}
177 177
178/* nVidia controllers continue to drive Reset signalling on the bus
179 * even after system shutdown, wasting power. This flag tells the
180 * shutdown routine to leave the controller OPERATIONAL instead of RESET.
181 */
182static int ohci_quirk_nvidia_shutdown(struct usb_hcd *hcd)
183{
184 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
185 struct ohci_hcd *ohci = hcd_to_ohci(hcd);
186
187 /* Evidently nVidia fixed their later hardware; this is a guess at
188 * the changeover point.
189 */
190#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_USB 0x026d
191
192 if (pdev->device < PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_USB) {
193 ohci->flags |= OHCI_QUIRK_SHUTDOWN;
194 ohci_dbg(ohci, "enabled nVidia shutdown quirk\n");
195 }
196
197 return 0;
198}
199
200static void sb800_prefetch(struct ohci_hcd *ohci, int on) 178static void sb800_prefetch(struct ohci_hcd *ohci, int on)
201{ 179{
202 struct pci_dev *pdev; 180 struct pci_dev *pdev;
@@ -260,10 +238,6 @@ static const struct pci_device_id ohci_pci_quirks[] = {
260 PCI_DEVICE(PCI_VENDOR_ID_ATI, 0x4399), 238 PCI_DEVICE(PCI_VENDOR_ID_ATI, 0x4399),
261 .driver_data = (unsigned long)ohci_quirk_amd700, 239 .driver_data = (unsigned long)ohci_quirk_amd700,
262 }, 240 },
263 {
264 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID),
265 .driver_data = (unsigned long) ohci_quirk_nvidia_shutdown,
266 },
267 241
268 /* FIXME for some of the early AMD 760 southbridges, OHCI 242 /* FIXME for some of the early AMD 760 southbridges, OHCI
269 * won't work at all. blacklist them. 243 * won't work at all. blacklist them.
diff --git a/drivers/usb/host/ohci.h b/drivers/usb/host/ohci.h
index 35e5fd640ce7..0795b934d00c 100644
--- a/drivers/usb/host/ohci.h
+++ b/drivers/usb/host/ohci.h
@@ -403,7 +403,6 @@ struct ohci_hcd {
403#define OHCI_QUIRK_HUB_POWER 0x100 /* distrust firmware power/oc setup */ 403#define OHCI_QUIRK_HUB_POWER 0x100 /* distrust firmware power/oc setup */
404#define OHCI_QUIRK_AMD_PLL 0x200 /* AMD PLL quirk*/ 404#define OHCI_QUIRK_AMD_PLL 0x200 /* AMD PLL quirk*/
405#define OHCI_QUIRK_AMD_PREFETCH 0x400 /* pre-fetch for ISO transfer */ 405#define OHCI_QUIRK_AMD_PREFETCH 0x400 /* pre-fetch for ISO transfer */
406#define OHCI_QUIRK_SHUTDOWN 0x800 /* nVidia power bug */
407 // there are also chip quirks/bugs in init logic 406 // there are also chip quirks/bugs in init logic
408 407
409 struct work_struct nec_work; /* Worker for NEC quirk */ 408 struct work_struct nec_work; /* Worker for NEC quirk */
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index 27a3dec32fa2..caf87428ca43 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -37,6 +37,7 @@
37#define OHCI_INTRENABLE 0x10 37#define OHCI_INTRENABLE 0x10
38#define OHCI_INTRDISABLE 0x14 38#define OHCI_INTRDISABLE 0x14
39#define OHCI_FMINTERVAL 0x34 39#define OHCI_FMINTERVAL 0x34
40#define OHCI_HCFS (3 << 6) /* hc functional state */
40#define OHCI_HCR (1 << 0) /* host controller reset */ 41#define OHCI_HCR (1 << 0) /* host controller reset */
41#define OHCI_OCR (1 << 3) /* ownership change request */ 42#define OHCI_OCR (1 << 3) /* ownership change request */
42#define OHCI_CTRL_RWC (1 << 9) /* remote wakeup connected */ 43#define OHCI_CTRL_RWC (1 << 9) /* remote wakeup connected */
@@ -466,6 +467,8 @@ static void __devinit quirk_usb_handoff_ohci(struct pci_dev *pdev)
466{ 467{
467 void __iomem *base; 468 void __iomem *base;
468 u32 control; 469 u32 control;
470 u32 fminterval;
471 int cnt;
469 472
470 if (!mmio_resource_enabled(pdev, 0)) 473 if (!mmio_resource_enabled(pdev, 0))
471 return; 474 return;
@@ -498,41 +501,32 @@ static void __devinit quirk_usb_handoff_ohci(struct pci_dev *pdev)
498 } 501 }
499#endif 502#endif
500 503
501 /* reset controller, preserving RWC (and possibly IR) */ 504 /* disable interrupts */
502 writel(control & OHCI_CTRL_MASK, base + OHCI_CONTROL); 505 writel((u32) ~0, base + OHCI_INTRDISABLE);
503 readl(base + OHCI_CONTROL);
504 506
505 /* Some NVIDIA controllers stop working if kept in RESET for too long */ 507 /* Reset the USB bus, if the controller isn't already in RESET */
506 if (pdev->vendor == PCI_VENDOR_ID_NVIDIA) { 508 if (control & OHCI_HCFS) {
507 u32 fminterval; 509 /* Go into RESET, preserving RWC (and possibly IR) */
508 int cnt; 510 writel(control & OHCI_CTRL_MASK, base + OHCI_CONTROL);
511 readl(base + OHCI_CONTROL);
509 512
510 /* drive reset for at least 50 ms (7.1.7.5) */ 513 /* drive bus reset for at least 50 ms (7.1.7.5) */
511 msleep(50); 514 msleep(50);
515 }
512 516
513 /* software reset of the controller, preserving HcFmInterval */ 517 /* software reset of the controller, preserving HcFmInterval */
514 fminterval = readl(base + OHCI_FMINTERVAL); 518 fminterval = readl(base + OHCI_FMINTERVAL);
515 writel(OHCI_HCR, base + OHCI_CMDSTATUS); 519 writel(OHCI_HCR, base + OHCI_CMDSTATUS);
516 520
517 /* reset requires max 10 us delay */ 521 /* reset requires max 10 us delay */
518 for (cnt = 30; cnt > 0; --cnt) { /* ... allow extra time */ 522 for (cnt = 30; cnt > 0; --cnt) { /* ... allow extra time */
519 if ((readl(base + OHCI_CMDSTATUS) & OHCI_HCR) == 0) 523 if ((readl(base + OHCI_CMDSTATUS) & OHCI_HCR) == 0)
520 break; 524 break;
521 udelay(1); 525 udelay(1);
522 }
523 writel(fminterval, base + OHCI_FMINTERVAL);
524
525 /* Now we're in the SUSPEND state with all devices reset
526 * and wakeups and interrupts disabled
527 */
528 } 526 }
527 writel(fminterval, base + OHCI_FMINTERVAL);
529 528
530 /* 529 /* Now the controller is safely in SUSPEND and nothing can wake it up */
531 * disable interrupts
532 */
533 writel(~(u32)0, base + OHCI_INTRDISABLE);
534 writel(~(u32)0, base + OHCI_INTRSTATUS);
535
536 iounmap(base); 530 iounmap(base);
537} 531}
538 532
@@ -627,7 +621,7 @@ static void __devinit quirk_usb_disable_ehci(struct pci_dev *pdev)
627 void __iomem *base, *op_reg_base; 621 void __iomem *base, *op_reg_base;
628 u32 hcc_params, cap, val; 622 u32 hcc_params, cap, val;
629 u8 offset, cap_length; 623 u8 offset, cap_length;
630 int wait_time, delta, count = 256/4; 624 int wait_time, count = 256/4;
631 625
632 if (!mmio_resource_enabled(pdev, 0)) 626 if (!mmio_resource_enabled(pdev, 0))
633 return; 627 return;
@@ -673,11 +667,10 @@ static void __devinit quirk_usb_disable_ehci(struct pci_dev *pdev)
673 writel(val, op_reg_base + EHCI_USBCMD); 667 writel(val, op_reg_base + EHCI_USBCMD);
674 668
675 wait_time = 2000; 669 wait_time = 2000;
676 delta = 100;
677 do { 670 do {
678 writel(0x3f, op_reg_base + EHCI_USBSTS); 671 writel(0x3f, op_reg_base + EHCI_USBSTS);
679 udelay(delta); 672 udelay(100);
680 wait_time -= delta; 673 wait_time -= 100;
681 val = readl(op_reg_base + EHCI_USBSTS); 674 val = readl(op_reg_base + EHCI_USBSTS);
682 if ((val == ~(u32)0) || (val & EHCI_USBSTS_HALTED)) { 675 if ((val == ~(u32)0) || (val & EHCI_USBSTS_HALTED)) {
683 break; 676 break;
diff --git a/drivers/usb/host/whci/qset.c b/drivers/usb/host/whci/qset.c
index d6e175428618..a403b53e86b9 100644
--- a/drivers/usb/host/whci/qset.c
+++ b/drivers/usb/host/whci/qset.c
@@ -124,7 +124,7 @@ void qset_clear(struct whc *whc, struct whc_qset *qset)
124{ 124{
125 qset->td_start = qset->td_end = qset->ntds = 0; 125 qset->td_start = qset->td_end = qset->ntds = 0;
126 126
127 qset->qh.link = cpu_to_le32(QH_LINK_NTDS(8) | QH_LINK_T); 127 qset->qh.link = cpu_to_le64(QH_LINK_NTDS(8) | QH_LINK_T);
128 qset->qh.status = qset->qh.status & QH_STATUS_SEQ_MASK; 128 qset->qh.status = qset->qh.status & QH_STATUS_SEQ_MASK;
129 qset->qh.err_count = 0; 129 qset->qh.err_count = 0;
130 qset->qh.scratch[0] = 0; 130 qset->qh.scratch[0] = 0;
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 42a22b8e6922..0e4b25fa3bcd 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -982,7 +982,6 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
982 struct xhci_virt_device *dev; 982 struct xhci_virt_device *dev;
983 struct xhci_ep_ctx *ep0_ctx; 983 struct xhci_ep_ctx *ep0_ctx;
984 struct xhci_slot_ctx *slot_ctx; 984 struct xhci_slot_ctx *slot_ctx;
985 struct xhci_input_control_ctx *ctrl_ctx;
986 u32 port_num; 985 u32 port_num;
987 struct usb_device *top_dev; 986 struct usb_device *top_dev;
988 987
@@ -994,12 +993,8 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
994 return -EINVAL; 993 return -EINVAL;
995 } 994 }
996 ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0); 995 ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0);
997 ctrl_ctx = xhci_get_input_control_ctx(xhci, dev->in_ctx);
998 slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx); 996 slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx);
999 997
1000 /* 2) New slot context and endpoint 0 context are valid*/
1001 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);
1002
1003 /* 3) Only the control endpoint is valid - one endpoint context */ 998 /* 3) Only the control endpoint is valid - one endpoint context */
1004 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | udev->route); 999 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | udev->route);
1005 switch (udev->speed) { 1000 switch (udev->speed) {
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 940321b3ec68..9f1d4b15d818 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -816,23 +816,24 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
816 struct xhci_ring *ring; 816 struct xhci_ring *ring;
817 struct xhci_td *cur_td; 817 struct xhci_td *cur_td;
818 int ret, i, j; 818 int ret, i, j;
819 unsigned long flags;
819 820
820 ep = (struct xhci_virt_ep *) arg; 821 ep = (struct xhci_virt_ep *) arg;
821 xhci = ep->xhci; 822 xhci = ep->xhci;
822 823
823 spin_lock(&xhci->lock); 824 spin_lock_irqsave(&xhci->lock, flags);
824 825
825 ep->stop_cmds_pending--; 826 ep->stop_cmds_pending--;
826 if (xhci->xhc_state & XHCI_STATE_DYING) { 827 if (xhci->xhc_state & XHCI_STATE_DYING) {
827 xhci_dbg(xhci, "Stop EP timer ran, but another timer marked " 828 xhci_dbg(xhci, "Stop EP timer ran, but another timer marked "
828 "xHCI as DYING, exiting.\n"); 829 "xHCI as DYING, exiting.\n");
829 spin_unlock(&xhci->lock); 830 spin_unlock_irqrestore(&xhci->lock, flags);
830 return; 831 return;
831 } 832 }
832 if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) { 833 if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) {
833 xhci_dbg(xhci, "Stop EP timer ran, but no command pending, " 834 xhci_dbg(xhci, "Stop EP timer ran, but no command pending, "
834 "exiting.\n"); 835 "exiting.\n");
835 spin_unlock(&xhci->lock); 836 spin_unlock_irqrestore(&xhci->lock, flags);
836 return; 837 return;
837 } 838 }
838 839
@@ -844,11 +845,11 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
844 xhci->xhc_state |= XHCI_STATE_DYING; 845 xhci->xhc_state |= XHCI_STATE_DYING;
845 /* Disable interrupts from the host controller and start halting it */ 846 /* Disable interrupts from the host controller and start halting it */
846 xhci_quiesce(xhci); 847 xhci_quiesce(xhci);
847 spin_unlock(&xhci->lock); 848 spin_unlock_irqrestore(&xhci->lock, flags);
848 849
849 ret = xhci_halt(xhci); 850 ret = xhci_halt(xhci);
850 851
851 spin_lock(&xhci->lock); 852 spin_lock_irqsave(&xhci->lock, flags);
852 if (ret < 0) { 853 if (ret < 0) {
853 /* This is bad; the host is not responding to commands and it's 854 /* This is bad; the host is not responding to commands and it's
854 * not allowing itself to be halted. At least interrupts are 855 * not allowing itself to be halted. At least interrupts are
@@ -896,7 +897,7 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
896 } 897 }
897 } 898 }
898 } 899 }
899 spin_unlock(&xhci->lock); 900 spin_unlock_irqrestore(&xhci->lock, flags);
900 xhci_dbg(xhci, "Calling usb_hc_died()\n"); 901 xhci_dbg(xhci, "Calling usb_hc_died()\n");
901 usb_hc_died(xhci_to_hcd(xhci)->primary_hcd); 902 usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
902 xhci_dbg(xhci, "xHCI host controller is dead.\n"); 903 xhci_dbg(xhci, "xHCI host controller is dead.\n");
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 1ff95a0df576..a1afb7c39f7e 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -711,7 +711,10 @@ static void xhci_clear_command_ring(struct xhci_hcd *xhci)
711 ring = xhci->cmd_ring; 711 ring = xhci->cmd_ring;
712 seg = ring->deq_seg; 712 seg = ring->deq_seg;
713 do { 713 do {
714 memset(seg->trbs, 0, SEGMENT_SIZE); 714 memset(seg->trbs, 0,
715 sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1));
716 seg->trbs[TRBS_PER_SEGMENT - 1].link.control &=
717 cpu_to_le32(~TRB_CYCLE);
715 seg = seg->next; 718 seg = seg->next;
716 } while (seg != ring->deq_seg); 719 } while (seg != ring->deq_seg);
717 720
@@ -799,7 +802,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
799 u32 command, temp = 0; 802 u32 command, temp = 0;
800 struct usb_hcd *hcd = xhci_to_hcd(xhci); 803 struct usb_hcd *hcd = xhci_to_hcd(xhci);
801 struct usb_hcd *secondary_hcd; 804 struct usb_hcd *secondary_hcd;
802 int retval; 805 int retval = 0;
803 806
804 /* Wait a bit if either of the roothubs need to settle from the 807 /* Wait a bit if either of the roothubs need to settle from the
805 * transition into bus suspend. 808 * transition into bus suspend.
@@ -809,6 +812,9 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
809 xhci->bus_state[1].next_statechange)) 812 xhci->bus_state[1].next_statechange))
810 msleep(100); 813 msleep(100);
811 814
815 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
816 set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
817
812 spin_lock_irq(&xhci->lock); 818 spin_lock_irq(&xhci->lock);
813 if (xhci->quirks & XHCI_RESET_ON_RESUME) 819 if (xhci->quirks & XHCI_RESET_ON_RESUME)
814 hibernated = true; 820 hibernated = true;
@@ -878,20 +884,13 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
878 return retval; 884 return retval;
879 xhci_dbg(xhci, "Start the primary HCD\n"); 885 xhci_dbg(xhci, "Start the primary HCD\n");
880 retval = xhci_run(hcd->primary_hcd); 886 retval = xhci_run(hcd->primary_hcd);
881 if (retval)
882 goto failed_restart;
883
884 xhci_dbg(xhci, "Start the secondary HCD\n");
885 retval = xhci_run(secondary_hcd);
886 if (!retval) { 887 if (!retval) {
887 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); 888 xhci_dbg(xhci, "Start the secondary HCD\n");
888 set_bit(HCD_FLAG_HW_ACCESSIBLE, 889 retval = xhci_run(secondary_hcd);
889 &xhci->shared_hcd->flags);
890 } 890 }
891failed_restart:
892 hcd->state = HC_STATE_SUSPENDED; 891 hcd->state = HC_STATE_SUSPENDED;
893 xhci->shared_hcd->state = HC_STATE_SUSPENDED; 892 xhci->shared_hcd->state = HC_STATE_SUSPENDED;
894 return retval; 893 goto done;
895 } 894 }
896 895
897 /* step 4: set Run/Stop bit */ 896 /* step 4: set Run/Stop bit */
@@ -910,11 +909,14 @@ failed_restart:
910 * Running endpoints by ringing their doorbells 909 * Running endpoints by ringing their doorbells
911 */ 910 */
912 911
913 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
914 set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
915
916 spin_unlock_irq(&xhci->lock); 912 spin_unlock_irq(&xhci->lock);
917 return 0; 913
914 done:
915 if (retval == 0) {
916 usb_hcd_resume_root_hub(hcd);
917 usb_hcd_resume_root_hub(xhci->shared_hcd);
918 }
919 return retval;
918} 920}
919#endif /* CONFIG_PM */ 921#endif /* CONFIG_PM */
920 922
@@ -3504,6 +3506,10 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
3504 /* Otherwise, update the control endpoint ring enqueue pointer. */ 3506 /* Otherwise, update the control endpoint ring enqueue pointer. */
3505 else 3507 else
3506 xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev); 3508 xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev);
3509 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
3510 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);
3511 ctrl_ctx->drop_flags = 0;
3512
3507 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); 3513 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
3508 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); 3514 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
3509 3515
@@ -3585,7 +3591,6 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
3585 virt_dev->address = (le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK) 3591 virt_dev->address = (le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK)
3586 + 1; 3592 + 1;
3587 /* Zero the input context control for later use */ 3593 /* Zero the input context control for later use */
3588 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
3589 ctrl_ctx->add_flags = 0; 3594 ctrl_ctx->add_flags = 0;
3590 ctrl_ctx->drop_flags = 0; 3595 ctrl_ctx->drop_flags = 0;
3591 3596
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index fc34b8b11910..07a03460a598 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -11,6 +11,7 @@ config USB_MUSB_HDRC
11 select TWL4030_USB if MACH_OMAP_3430SDP 11 select TWL4030_USB if MACH_OMAP_3430SDP
12 select TWL6030_USB if MACH_OMAP_4430SDP || MACH_OMAP4_PANDA 12 select TWL6030_USB if MACH_OMAP_4430SDP || MACH_OMAP4_PANDA
13 select USB_OTG_UTILS 13 select USB_OTG_UTILS
14 select USB_GADGET_DUALSPEED
14 tristate 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)' 15 tristate 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)'
15 help 16 help
16 Say Y here if your system has a dual role high speed USB 17 Say Y here if your system has a dual role high speed USB
@@ -60,7 +61,7 @@ config USB_MUSB_BLACKFIN
60 61
61config USB_MUSB_UX500 62config USB_MUSB_UX500
62 tristate "U8500 and U5500" 63 tristate "U8500 and U5500"
63 depends on (ARCH_U8500 && AB8500_USB) || (ARCH_U5500) 64 depends on (ARCH_U8500 && AB8500_USB)
64 65
65endchoice 66endchoice
66 67
diff --git a/drivers/usb/musb/am35x.c b/drivers/usb/musb/am35x.c
index 08f1d0b662a3..e233d2b7d335 100644
--- a/drivers/usb/musb/am35x.c
+++ b/drivers/usb/musb/am35x.c
@@ -27,6 +27,7 @@
27 */ 27 */
28 28
29#include <linux/init.h> 29#include <linux/init.h>
30#include <linux/module.h>
30#include <linux/clk.h> 31#include <linux/clk.h>
31#include <linux/io.h> 32#include <linux/io.h>
32#include <linux/platform_device.h> 33#include <linux/platform_device.h>
diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c
index 4da7492ddbdb..2613bfdb09b6 100644
--- a/drivers/usb/musb/da8xx.c
+++ b/drivers/usb/musb/da8xx.c
@@ -27,6 +27,7 @@
27 */ 27 */
28 28
29#include <linux/init.h> 29#include <linux/init.h>
30#include <linux/module.h>
30#include <linux/clk.h> 31#include <linux/clk.h>
31#include <linux/io.h> 32#include <linux/io.h>
32#include <linux/platform_device.h> 33#include <linux/platform_device.h>
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 20a28731c338..b63ab1570103 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1477,8 +1477,7 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb)
1477/*-------------------------------------------------------------------------*/ 1477/*-------------------------------------------------------------------------*/
1478 1478
1479#if defined(CONFIG_SOC_OMAP2430) || defined(CONFIG_SOC_OMAP3430) || \ 1479#if defined(CONFIG_SOC_OMAP2430) || defined(CONFIG_SOC_OMAP3430) || \
1480 defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_ARCH_U8500) || \ 1480 defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_ARCH_U8500)
1481 defined(CONFIG_ARCH_U5500)
1482 1481
1483static irqreturn_t generic_interrupt(int irq, void *__hci) 1482static irqreturn_t generic_interrupt(int irq, void *__hci)
1484{ 1483{
@@ -2302,18 +2301,12 @@ static int musb_suspend(struct device *dev)
2302 */ 2301 */
2303 } 2302 }
2304 2303
2305 musb_save_context(musb);
2306
2307 spin_unlock_irqrestore(&musb->lock, flags); 2304 spin_unlock_irqrestore(&musb->lock, flags);
2308 return 0; 2305 return 0;
2309} 2306}
2310 2307
2311static int musb_resume_noirq(struct device *dev) 2308static int musb_resume_noirq(struct device *dev)
2312{ 2309{
2313 struct musb *musb = dev_to_musb(dev);
2314
2315 musb_restore_context(musb);
2316
2317 /* for static cmos like DaVinci, register values were preserved 2310 /* for static cmos like DaVinci, register values were preserved
2318 * unless for some reason the whole soc powered down or the USB 2311 * unless for some reason the whole soc powered down or the USB
2319 * module got reset through the PSC (vs just being disabled). 2312 * module got reset through the PSC (vs just being disabled).
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index ae4a20acef6c..922148ff8d29 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -1903,7 +1903,7 @@ static int musb_gadget_start(struct usb_gadget *g,
1903 unsigned long flags; 1903 unsigned long flags;
1904 int retval = -EINVAL; 1904 int retval = -EINVAL;
1905 1905
1906 if (driver->speed != USB_SPEED_HIGH) 1906 if (driver->speed < USB_SPEED_HIGH)
1907 goto err0; 1907 goto err0;
1908 1908
1909 pm_runtime_get_sync(musb->controller); 1909 pm_runtime_get_sync(musb->controller);
@@ -1999,10 +1999,6 @@ static void stop_activity(struct musb *musb, struct usb_gadget_driver *driver)
1999 nuke(&hw_ep->ep_out, -ESHUTDOWN); 1999 nuke(&hw_ep->ep_out, -ESHUTDOWN);
2000 } 2000 }
2001 } 2001 }
2002
2003 spin_unlock(&musb->lock);
2004 driver->disconnect(&musb->g);
2005 spin_lock(&musb->lock);
2006 } 2002 }
2007} 2003}
2008 2004
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
index d2e2efaba658..08c679c0dde5 100644
--- a/drivers/usb/renesas_usbhs/common.c
+++ b/drivers/usb/renesas_usbhs/common.c
@@ -405,7 +405,7 @@ int usbhsc_drvcllbck_notify_hotplug(struct platform_device *pdev)
405/* 405/*
406 * platform functions 406 * platform functions
407 */ 407 */
408static int __devinit usbhs_probe(struct platform_device *pdev) 408static int usbhs_probe(struct platform_device *pdev)
409{ 409{
410 struct renesas_usbhs_platform_info *info = pdev->dev.platform_data; 410 struct renesas_usbhs_platform_info *info = pdev->dev.platform_data;
411 struct renesas_usbhs_driver_callback *dfunc; 411 struct renesas_usbhs_driver_callback *dfunc;
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index 8da685e796d1..ffdf5d15085e 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -820,7 +820,7 @@ static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done)
820 if (len % 4) /* 32bit alignment */ 820 if (len % 4) /* 32bit alignment */
821 goto usbhsf_pio_prepare_push; 821 goto usbhsf_pio_prepare_push;
822 822
823 if ((*(u32 *) pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */ 823 if ((uintptr_t)(pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */
824 goto usbhsf_pio_prepare_push; 824 goto usbhsf_pio_prepare_push;
825 825
826 /* get enable DMA fifo */ 826 /* get enable DMA fifo */
@@ -897,7 +897,7 @@ static int usbhsf_dma_try_pop(struct usbhs_pkt *pkt, int *is_done)
897 if (!fifo) 897 if (!fifo)
898 goto usbhsf_pio_prepare_pop; 898 goto usbhsf_pio_prepare_pop;
899 899
900 if ((*(u32 *) pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */ 900 if ((uintptr_t)(pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */
901 goto usbhsf_pio_prepare_pop; 901 goto usbhsf_pio_prepare_pop;
902 902
903 ret = usbhsf_fifo_select(pipe, fifo, 0); 903 ret = usbhsf_fifo_select(pipe, fifo, 0);
diff --git a/drivers/usb/renesas_usbhs/mod.c b/drivers/usb/renesas_usbhs/mod.c
index 053f86d70009..ad96a3896729 100644
--- a/drivers/usb/renesas_usbhs/mod.c
+++ b/drivers/usb/renesas_usbhs/mod.c
@@ -349,7 +349,7 @@ void usbhs_irq_callback_update(struct usbhs_priv *priv, struct usbhs_mod *mod)
349 if (mod->irq_attch) 349 if (mod->irq_attch)
350 intenb1 |= ATTCHE; 350 intenb1 |= ATTCHE;
351 351
352 if (mod->irq_attch) 352 if (mod->irq_dtch)
353 intenb1 |= DTCHE; 353 intenb1 |= DTCHE;
354 354
355 if (mod->irq_sign) 355 if (mod->irq_sign)
diff --git a/drivers/usb/renesas_usbhs/mod.h b/drivers/usb/renesas_usbhs/mod.h
index 8ae3733031cd..6c6875533f01 100644
--- a/drivers/usb/renesas_usbhs/mod.h
+++ b/drivers/usb/renesas_usbhs/mod.h
@@ -143,8 +143,8 @@ void usbhs_irq_callback_update(struct usbhs_priv *priv, struct usbhs_mod *mod);
143 */ 143 */
144#if defined(CONFIG_USB_RENESAS_USBHS_HCD) || \ 144#if defined(CONFIG_USB_RENESAS_USBHS_HCD) || \
145 defined(CONFIG_USB_RENESAS_USBHS_HCD_MODULE) 145 defined(CONFIG_USB_RENESAS_USBHS_HCD_MODULE)
146extern int __devinit usbhs_mod_host_probe(struct usbhs_priv *priv); 146extern int usbhs_mod_host_probe(struct usbhs_priv *priv);
147extern int __devexit usbhs_mod_host_remove(struct usbhs_priv *priv); 147extern int usbhs_mod_host_remove(struct usbhs_priv *priv);
148#else 148#else
149static inline int usbhs_mod_host_probe(struct usbhs_priv *priv) 149static inline int usbhs_mod_host_probe(struct usbhs_priv *priv)
150{ 150{
@@ -157,8 +157,8 @@ static inline void usbhs_mod_host_remove(struct usbhs_priv *priv)
157 157
158#if defined(CONFIG_USB_RENESAS_USBHS_UDC) || \ 158#if defined(CONFIG_USB_RENESAS_USBHS_UDC) || \
159 defined(CONFIG_USB_RENESAS_USBHS_UDC_MODULE) 159 defined(CONFIG_USB_RENESAS_USBHS_UDC_MODULE)
160extern int __devinit usbhs_mod_gadget_probe(struct usbhs_priv *priv); 160extern int usbhs_mod_gadget_probe(struct usbhs_priv *priv);
161extern void __devexit usbhs_mod_gadget_remove(struct usbhs_priv *priv); 161extern void usbhs_mod_gadget_remove(struct usbhs_priv *priv);
162#else 162#else
163static inline int usbhs_mod_gadget_probe(struct usbhs_priv *priv) 163static inline int usbhs_mod_gadget_probe(struct usbhs_priv *priv)
164{ 164{
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index 4cc7ee0babc6..7f4e80338570 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -751,53 +751,32 @@ static int usbhsg_gadget_start(struct usb_gadget *gadget,
751 struct usb_gadget_driver *driver) 751 struct usb_gadget_driver *driver)
752{ 752{
753 struct usbhsg_gpriv *gpriv = usbhsg_gadget_to_gpriv(gadget); 753 struct usbhsg_gpriv *gpriv = usbhsg_gadget_to_gpriv(gadget);
754 struct usbhs_priv *priv; 754 struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
755 struct device *dev;
756 int ret;
757 755
758 if (!driver || 756 if (!driver ||
759 !driver->setup || 757 !driver->setup ||
760 driver->speed != USB_SPEED_HIGH) 758 driver->speed < USB_SPEED_FULL)
761 return -EINVAL; 759 return -EINVAL;
762 760
763 dev = usbhsg_gpriv_to_dev(gpriv);
764 priv = usbhsg_gpriv_to_priv(gpriv);
765
766 /* first hook up the driver ... */ 761 /* first hook up the driver ... */
767 gpriv->driver = driver; 762 gpriv->driver = driver;
768 gpriv->gadget.dev.driver = &driver->driver; 763 gpriv->gadget.dev.driver = &driver->driver;
769 764
770 ret = device_add(&gpriv->gadget.dev);
771 if (ret) {
772 dev_err(dev, "device_add error %d\n", ret);
773 goto add_fail;
774 }
775
776 return usbhsg_try_start(priv, USBHSG_STATUS_REGISTERD); 765 return usbhsg_try_start(priv, USBHSG_STATUS_REGISTERD);
777
778add_fail:
779 gpriv->driver = NULL;
780 gpriv->gadget.dev.driver = NULL;
781
782 return ret;
783} 766}
784 767
785static int usbhsg_gadget_stop(struct usb_gadget *gadget, 768static int usbhsg_gadget_stop(struct usb_gadget *gadget,
786 struct usb_gadget_driver *driver) 769 struct usb_gadget_driver *driver)
787{ 770{
788 struct usbhsg_gpriv *gpriv = usbhsg_gadget_to_gpriv(gadget); 771 struct usbhsg_gpriv *gpriv = usbhsg_gadget_to_gpriv(gadget);
789 struct usbhs_priv *priv; 772 struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
790 struct device *dev;
791 773
792 if (!driver || 774 if (!driver ||
793 !driver->unbind) 775 !driver->unbind)
794 return -EINVAL; 776 return -EINVAL;
795 777
796 dev = usbhsg_gpriv_to_dev(gpriv);
797 priv = usbhsg_gpriv_to_priv(gpriv);
798
799 usbhsg_try_stop(priv, USBHSG_STATUS_REGISTERD); 778 usbhsg_try_stop(priv, USBHSG_STATUS_REGISTERD);
800 device_del(&gpriv->gadget.dev); 779 gpriv->gadget.dev.driver = NULL;
801 gpriv->driver = NULL; 780 gpriv->driver = NULL;
802 781
803 return 0; 782 return 0;
@@ -827,10 +806,17 @@ static int usbhsg_start(struct usbhs_priv *priv)
827 806
828static int usbhsg_stop(struct usbhs_priv *priv) 807static int usbhsg_stop(struct usbhs_priv *priv)
829{ 808{
809 struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv);
810
811 /* cable disconnect */
812 if (gpriv->driver &&
813 gpriv->driver->disconnect)
814 gpriv->driver->disconnect(&gpriv->gadget);
815
830 return usbhsg_try_stop(priv, USBHSG_STATUS_STARTED); 816 return usbhsg_try_stop(priv, USBHSG_STATUS_STARTED);
831} 817}
832 818
833int __devinit usbhs_mod_gadget_probe(struct usbhs_priv *priv) 819int usbhs_mod_gadget_probe(struct usbhs_priv *priv)
834{ 820{
835 struct usbhsg_gpriv *gpriv; 821 struct usbhsg_gpriv *gpriv;
836 struct usbhsg_uep *uep; 822 struct usbhsg_uep *uep;
@@ -876,12 +862,14 @@ int __devinit usbhs_mod_gadget_probe(struct usbhs_priv *priv)
876 /* 862 /*
877 * init gadget 863 * init gadget
878 */ 864 */
879 device_initialize(&gpriv->gadget.dev);
880 dev_set_name(&gpriv->gadget.dev, "gadget"); 865 dev_set_name(&gpriv->gadget.dev, "gadget");
881 gpriv->gadget.dev.parent = dev; 866 gpriv->gadget.dev.parent = dev;
882 gpriv->gadget.name = "renesas_usbhs_udc"; 867 gpriv->gadget.name = "renesas_usbhs_udc";
883 gpriv->gadget.ops = &usbhsg_gadget_ops; 868 gpriv->gadget.ops = &usbhsg_gadget_ops;
884 gpriv->gadget.is_dualspeed = 1; 869 gpriv->gadget.is_dualspeed = 1;
870 ret = device_register(&gpriv->gadget.dev);
871 if (ret < 0)
872 goto err_add_udc;
885 873
886 INIT_LIST_HEAD(&gpriv->gadget.ep_list); 874 INIT_LIST_HEAD(&gpriv->gadget.ep_list);
887 875
@@ -912,12 +900,15 @@ int __devinit usbhs_mod_gadget_probe(struct usbhs_priv *priv)
912 900
913 ret = usb_add_gadget_udc(dev, &gpriv->gadget); 901 ret = usb_add_gadget_udc(dev, &gpriv->gadget);
914 if (ret) 902 if (ret)
915 goto err_add_udc; 903 goto err_register;
916 904
917 905
918 dev_info(dev, "gadget probed\n"); 906 dev_info(dev, "gadget probed\n");
919 907
920 return 0; 908 return 0;
909
910err_register:
911 device_unregister(&gpriv->gadget.dev);
921err_add_udc: 912err_add_udc:
922 kfree(gpriv->uep); 913 kfree(gpriv->uep);
923 914
@@ -927,12 +918,14 @@ usbhs_mod_gadget_probe_err_gpriv:
927 return ret; 918 return ret;
928} 919}
929 920
930void __devexit usbhs_mod_gadget_remove(struct usbhs_priv *priv) 921void usbhs_mod_gadget_remove(struct usbhs_priv *priv)
931{ 922{
932 struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv); 923 struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv);
933 924
934 usb_del_gadget_udc(&gpriv->gadget); 925 usb_del_gadget_udc(&gpriv->gadget);
935 926
927 device_unregister(&gpriv->gadget.dev);
928
936 usbhsg_controller_unregister(gpriv); 929 usbhsg_controller_unregister(gpriv);
937 930
938 kfree(gpriv->uep); 931 kfree(gpriv->uep);
diff --git a/drivers/usb/renesas_usbhs/mod_host.c b/drivers/usb/renesas_usbhs/mod_host.c
index 1a7208a50afc..7955de589951 100644
--- a/drivers/usb/renesas_usbhs/mod_host.c
+++ b/drivers/usb/renesas_usbhs/mod_host.c
@@ -103,7 +103,7 @@ struct usbhsh_hpriv {
103 103
104 u32 port_stat; /* USB_PORT_STAT_xxx */ 104 u32 port_stat; /* USB_PORT_STAT_xxx */
105 105
106 struct completion *done; 106 struct completion setup_ack_done;
107 107
108 /* see usbhsh_req_alloc/free */ 108 /* see usbhsh_req_alloc/free */
109 struct list_head ureq_link_active; 109 struct list_head ureq_link_active;
@@ -355,6 +355,7 @@ static void usbhsh_device_free(struct usbhsh_hpriv *hpriv,
355struct usbhsh_ep *usbhsh_endpoint_alloc(struct usbhsh_hpriv *hpriv, 355struct usbhsh_ep *usbhsh_endpoint_alloc(struct usbhsh_hpriv *hpriv,
356 struct usbhsh_device *udev, 356 struct usbhsh_device *udev,
357 struct usb_host_endpoint *ep, 357 struct usb_host_endpoint *ep,
358 int dir_in_req,
358 gfp_t mem_flags) 359 gfp_t mem_flags)
359{ 360{
360 struct usbhs_priv *priv = usbhsh_hpriv_to_priv(hpriv); 361 struct usbhs_priv *priv = usbhsh_hpriv_to_priv(hpriv);
@@ -364,27 +365,38 @@ struct usbhsh_ep *usbhsh_endpoint_alloc(struct usbhsh_hpriv *hpriv,
364 struct usbhs_pipe *pipe, *best_pipe; 365 struct usbhs_pipe *pipe, *best_pipe;
365 struct device *dev = usbhsh_hcd_to_dev(hcd); 366 struct device *dev = usbhsh_hcd_to_dev(hcd);
366 struct usb_endpoint_descriptor *desc = &ep->desc; 367 struct usb_endpoint_descriptor *desc = &ep->desc;
367 int type, i; 368 int type, i, dir_in;
368 unsigned int min_usr; 369 unsigned int min_usr;
369 370
371 dir_in_req = !!dir_in_req;
372
370 uep = kzalloc(sizeof(struct usbhsh_ep), mem_flags); 373 uep = kzalloc(sizeof(struct usbhsh_ep), mem_flags);
371 if (!uep) { 374 if (!uep) {
372 dev_err(dev, "usbhsh_ep alloc fail\n"); 375 dev_err(dev, "usbhsh_ep alloc fail\n");
373 return NULL; 376 return NULL;
374 } 377 }
375 type = usb_endpoint_type(desc); 378
379 if (usb_endpoint_xfer_control(desc)) {
380 best_pipe = usbhsh_hpriv_to_dcp(hpriv);
381 goto usbhsh_endpoint_alloc_find_pipe;
382 }
376 383
377 /* 384 /*
378 * find best pipe for endpoint 385 * find best pipe for endpoint
379 * see 386 * see
380 * HARDWARE LIMITATION 387 * HARDWARE LIMITATION
381 */ 388 */
389 type = usb_endpoint_type(desc);
382 min_usr = ~0; 390 min_usr = ~0;
383 best_pipe = NULL; 391 best_pipe = NULL;
384 usbhs_for_each_pipe_with_dcp(pipe, priv, i) { 392 usbhs_for_each_pipe(pipe, priv, i) {
385 if (!usbhs_pipe_type_is(pipe, type)) 393 if (!usbhs_pipe_type_is(pipe, type))
386 continue; 394 continue;
387 395
396 dir_in = !!usbhs_pipe_is_dir_in(pipe);
397 if (0 != (dir_in - dir_in_req))
398 continue;
399
388 info = usbhsh_pipe_info(pipe); 400 info = usbhsh_pipe_info(pipe);
389 401
390 if (min_usr > info->usr_cnt) { 402 if (min_usr > info->usr_cnt) {
@@ -398,7 +410,7 @@ struct usbhsh_ep *usbhsh_endpoint_alloc(struct usbhsh_hpriv *hpriv,
398 kfree(uep); 410 kfree(uep);
399 return NULL; 411 return NULL;
400 } 412 }
401 413usbhsh_endpoint_alloc_find_pipe:
402 /* 414 /*
403 * init uep 415 * init uep
404 */ 416 */
@@ -423,6 +435,7 @@ struct usbhsh_ep *usbhsh_endpoint_alloc(struct usbhsh_hpriv *hpriv,
423 * see 435 * see
424 * DCPMAXP/PIPEMAXP 436 * DCPMAXP/PIPEMAXP
425 */ 437 */
438 usbhs_pipe_sequence_data0(uep->pipe);
426 usbhs_pipe_config_update(uep->pipe, 439 usbhs_pipe_config_update(uep->pipe,
427 usbhsh_device_number(hpriv, udev), 440 usbhsh_device_number(hpriv, udev),
428 usb_endpoint_num(desc), 441 usb_endpoint_num(desc),
@@ -430,7 +443,7 @@ struct usbhsh_ep *usbhsh_endpoint_alloc(struct usbhsh_hpriv *hpriv,
430 443
431 dev_dbg(dev, "%s [%d-%s](%p)\n", __func__, 444 dev_dbg(dev, "%s [%d-%s](%p)\n", __func__,
432 usbhsh_device_number(hpriv, udev), 445 usbhsh_device_number(hpriv, udev),
433 usbhs_pipe_name(pipe), uep); 446 usbhs_pipe_name(uep->pipe), uep);
434 447
435 return uep; 448 return uep;
436} 449}
@@ -549,8 +562,7 @@ static void usbhsh_setup_stage_packet_push(struct usbhsh_hpriv *hpriv,
549 * usbhsh_irq_setup_ack() 562 * usbhsh_irq_setup_ack()
550 * usbhsh_irq_setup_err() 563 * usbhsh_irq_setup_err()
551 */ 564 */
552 DECLARE_COMPLETION(done); 565 init_completion(&hpriv->setup_ack_done);
553 hpriv->done = &done;
554 566
555 /* copy original request */ 567 /* copy original request */
556 memcpy(&req, urb->setup_packet, sizeof(struct usb_ctrlrequest)); 568 memcpy(&req, urb->setup_packet, sizeof(struct usb_ctrlrequest));
@@ -572,8 +584,7 @@ static void usbhsh_setup_stage_packet_push(struct usbhsh_hpriv *hpriv,
572 /* 584 /*
573 * wait setup packet ACK 585 * wait setup packet ACK
574 */ 586 */
575 wait_for_completion(&done); 587 wait_for_completion(&hpriv->setup_ack_done);
576 hpriv->done = NULL;
577 588
578 dev_dbg(dev, "%s done\n", __func__); 589 dev_dbg(dev, "%s done\n", __func__);
579} 590}
@@ -724,11 +735,11 @@ static int usbhsh_urb_enqueue(struct usb_hcd *hcd,
724 struct usbhsh_device *udev, *new_udev = NULL; 735 struct usbhsh_device *udev, *new_udev = NULL;
725 struct usbhs_pipe *pipe; 736 struct usbhs_pipe *pipe;
726 struct usbhsh_ep *uep; 737 struct usbhsh_ep *uep;
738 int is_dir_in = usb_pipein(urb->pipe);
727 739
728 int ret; 740 int ret;
729 741
730 dev_dbg(dev, "%s (%s)\n", 742 dev_dbg(dev, "%s (%s)\n", __func__, is_dir_in ? "in" : "out");
731 __func__, usb_pipein(urb->pipe) ? "in" : "out");
732 743
733 ret = usb_hcd_link_urb_to_ep(hcd, urb); 744 ret = usb_hcd_link_urb_to_ep(hcd, urb);
734 if (ret) 745 if (ret)
@@ -751,7 +762,8 @@ static int usbhsh_urb_enqueue(struct usb_hcd *hcd,
751 */ 762 */
752 uep = usbhsh_ep_to_uep(ep); 763 uep = usbhsh_ep_to_uep(ep);
753 if (!uep) { 764 if (!uep) {
754 uep = usbhsh_endpoint_alloc(hpriv, udev, ep, mem_flags); 765 uep = usbhsh_endpoint_alloc(hpriv, udev, ep,
766 is_dir_in, mem_flags);
755 if (!uep) 767 if (!uep)
756 goto usbhsh_urb_enqueue_error_free_device; 768 goto usbhsh_urb_enqueue_error_free_device;
757 } 769 }
@@ -1095,10 +1107,7 @@ static int usbhsh_irq_setup_ack(struct usbhs_priv *priv,
1095 1107
1096 dev_dbg(dev, "setup packet OK\n"); 1108 dev_dbg(dev, "setup packet OK\n");
1097 1109
1098 if (unlikely(!hpriv->done)) 1110 complete(&hpriv->setup_ack_done); /* see usbhsh_urb_enqueue() */
1099 dev_err(dev, "setup ack happen without necessary data\n");
1100 else
1101 complete(hpriv->done); /* see usbhsh_urb_enqueue() */
1102 1111
1103 return 0; 1112 return 0;
1104} 1113}
@@ -1111,10 +1120,7 @@ static int usbhsh_irq_setup_err(struct usbhs_priv *priv,
1111 1120
1112 dev_dbg(dev, "setup packet Err\n"); 1121 dev_dbg(dev, "setup packet Err\n");
1113 1122
1114 if (unlikely(!hpriv->done)) 1123 complete(&hpriv->setup_ack_done); /* see usbhsh_urb_enqueue() */
1115 dev_err(dev, "setup err happen without necessary data\n");
1116 else
1117 complete(hpriv->done); /* see usbhsh_urb_enqueue() */
1118 1124
1119 return 0; 1125 return 0;
1120} 1126}
@@ -1221,8 +1227,18 @@ static int usbhsh_stop(struct usbhs_priv *priv)
1221{ 1227{
1222 struct usbhsh_hpriv *hpriv = usbhsh_priv_to_hpriv(priv); 1228 struct usbhsh_hpriv *hpriv = usbhsh_priv_to_hpriv(priv);
1223 struct usb_hcd *hcd = usbhsh_hpriv_to_hcd(hpriv); 1229 struct usb_hcd *hcd = usbhsh_hpriv_to_hcd(hpriv);
1230 struct usbhs_mod *mod = usbhs_mod_get_current(priv);
1224 struct device *dev = usbhs_priv_to_dev(priv); 1231 struct device *dev = usbhs_priv_to_dev(priv);
1225 1232
1233 /*
1234 * disable irq callback
1235 */
1236 mod->irq_attch = NULL;
1237 mod->irq_dtch = NULL;
1238 mod->irq_sack = NULL;
1239 mod->irq_sign = NULL;
1240 usbhs_irq_callback_update(priv, mod);
1241
1226 usb_remove_hcd(hcd); 1242 usb_remove_hcd(hcd);
1227 1243
1228 /* disable sys */ 1244 /* disable sys */
@@ -1235,7 +1251,7 @@ static int usbhsh_stop(struct usbhs_priv *priv)
1235 return 0; 1251 return 0;
1236} 1252}
1237 1253
1238int __devinit usbhs_mod_host_probe(struct usbhs_priv *priv) 1254int usbhs_mod_host_probe(struct usbhs_priv *priv)
1239{ 1255{
1240 struct usbhsh_hpriv *hpriv; 1256 struct usbhsh_hpriv *hpriv;
1241 struct usb_hcd *hcd; 1257 struct usb_hcd *hcd;
@@ -1251,6 +1267,7 @@ int __devinit usbhs_mod_host_probe(struct usbhs_priv *priv)
1251 dev_err(dev, "Failed to create hcd\n"); 1267 dev_err(dev, "Failed to create hcd\n");
1252 return -ENOMEM; 1268 return -ENOMEM;
1253 } 1269 }
1270 hcd->has_tt = 1; /* for low/full speed */
1254 1271
1255 pipe_info = kzalloc(sizeof(*pipe_info) * pipe_size, GFP_KERNEL); 1272 pipe_info = kzalloc(sizeof(*pipe_info) * pipe_size, GFP_KERNEL);
1256 if (!pipe_info) { 1273 if (!pipe_info) {
@@ -1279,7 +1296,6 @@ int __devinit usbhs_mod_host_probe(struct usbhs_priv *priv)
1279 hpriv->mod.stop = usbhsh_stop; 1296 hpriv->mod.stop = usbhsh_stop;
1280 hpriv->pipe_info = pipe_info; 1297 hpriv->pipe_info = pipe_info;
1281 hpriv->pipe_size = pipe_size; 1298 hpriv->pipe_size = pipe_size;
1282 hpriv->done = NULL;
1283 usbhsh_req_list_init(hpriv); 1299 usbhsh_req_list_init(hpriv);
1284 usbhsh_port_stat_init(hpriv); 1300 usbhsh_port_stat_init(hpriv);
1285 1301
@@ -1299,7 +1315,7 @@ usbhs_mod_host_probe_err:
1299 return -ENOMEM; 1315 return -ENOMEM;
1300} 1316}
1301 1317
1302int __devexit usbhs_mod_host_remove(struct usbhs_priv *priv) 1318int usbhs_mod_host_remove(struct usbhs_priv *priv)
1303{ 1319{
1304 struct usbhsh_hpriv *hpriv = usbhsh_priv_to_hpriv(priv); 1320 struct usbhsh_hpriv *hpriv = usbhsh_priv_to_hpriv(priv);
1305 struct usb_hcd *hcd = usbhsh_hpriv_to_hcd(hpriv); 1321 struct usb_hcd *hcd = usbhsh_hpriv_to_hcd(hpriv);
diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c
index 5cdb9d912275..18e875b92e00 100644
--- a/drivers/usb/serial/ark3116.c
+++ b/drivers/usb/serial/ark3116.c
@@ -42,7 +42,7 @@ static int debug;
42 * Version information 42 * Version information
43 */ 43 */
44 44
45#define DRIVER_VERSION "v0.6" 45#define DRIVER_VERSION "v0.7"
46#define DRIVER_AUTHOR "Bart Hartgers <bart.hartgers+ark3116@gmail.com>" 46#define DRIVER_AUTHOR "Bart Hartgers <bart.hartgers+ark3116@gmail.com>"
47#define DRIVER_DESC "USB ARK3116 serial/IrDA driver" 47#define DRIVER_DESC "USB ARK3116 serial/IrDA driver"
48#define DRIVER_DEV_DESC "ARK3116 RS232/IrDA" 48#define DRIVER_DEV_DESC "ARK3116 RS232/IrDA"
@@ -380,10 +380,6 @@ static int ark3116_open(struct tty_struct *tty, struct usb_serial_port *port)
380 goto err_out; 380 goto err_out;
381 } 381 }
382 382
383 /* setup termios */
384 if (tty)
385 ark3116_set_termios(tty, port, NULL);
386
387 /* remove any data still left: also clears error state */ 383 /* remove any data still left: also clears error state */
388 ark3116_read_reg(serial, UART_RX, buf); 384 ark3116_read_reg(serial, UART_RX, buf);
389 385
@@ -406,6 +402,10 @@ static int ark3116_open(struct tty_struct *tty, struct usb_serial_port *port)
406 /* enable DMA */ 402 /* enable DMA */
407 ark3116_write_reg(port->serial, UART_FCR, UART_FCR_DMA_SELECT); 403 ark3116_write_reg(port->serial, UART_FCR, UART_FCR_DMA_SELECT);
408 404
405 /* setup termios */
406 if (tty)
407 ark3116_set_termios(tty, port, NULL);
408
409err_out: 409err_out:
410 kfree(buf); 410 kfree(buf);
411 return result; 411 return result;
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 8fe034d2d3e7..ff3db5d056a5 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -736,6 +736,7 @@ static struct usb_device_id id_table_combined [] = {
736 { USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) }, 736 { USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) },
737 { USB_DEVICE(FTDI_VID, FTDI_ELSTER_UNICOM_PID) }, 737 { USB_DEVICE(FTDI_VID, FTDI_ELSTER_UNICOM_PID) },
738 { USB_DEVICE(FTDI_VID, FTDI_PROPOX_JTAGCABLEII_PID) }, 738 { USB_DEVICE(FTDI_VID, FTDI_PROPOX_JTAGCABLEII_PID) },
739 { USB_DEVICE(FTDI_VID, FTDI_PROPOX_ISPCABLEIII_PID) },
739 { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_PID), 740 { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_PID),
740 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, 741 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
741 { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_H_PID), 742 { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_H_PID),
@@ -2104,13 +2105,19 @@ static void ftdi_set_termios(struct tty_struct *tty,
2104 2105
2105 cflag = termios->c_cflag; 2106 cflag = termios->c_cflag;
2106 2107
2107 /* FIXME -For this cut I don't care if the line is really changing or 2108 if (old_termios->c_cflag == termios->c_cflag
2108 not - so just do the change regardless - should be able to 2109 && old_termios->c_ispeed == termios->c_ispeed
2109 compare old_termios and tty->termios */ 2110 && old_termios->c_ospeed == termios->c_ospeed)
2111 goto no_c_cflag_changes;
2112
2110 /* NOTE These routines can get interrupted by 2113 /* NOTE These routines can get interrupted by
2111 ftdi_sio_read_bulk_callback - need to examine what this means - 2114 ftdi_sio_read_bulk_callback - need to examine what this means -
2112 don't see any problems yet */ 2115 don't see any problems yet */
2113 2116
2117 if ((old_termios->c_cflag & (CSIZE|PARODD|PARENB|CMSPAR|CSTOPB)) ==
2118 (termios->c_cflag & (CSIZE|PARODD|PARENB|CMSPAR|CSTOPB)))
2119 goto no_data_parity_stop_changes;
2120
2114 /* Set number of data bits, parity, stop bits */ 2121 /* Set number of data bits, parity, stop bits */
2115 2122
2116 urb_value = 0; 2123 urb_value = 0;
@@ -2151,6 +2158,7 @@ static void ftdi_set_termios(struct tty_struct *tty,
2151 } 2158 }
2152 2159
2153 /* Now do the baudrate */ 2160 /* Now do the baudrate */
2161no_data_parity_stop_changes:
2154 if ((cflag & CBAUD) == B0) { 2162 if ((cflag & CBAUD) == B0) {
2155 /* Disable flow control */ 2163 /* Disable flow control */
2156 if (usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 2164 if (usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
@@ -2178,6 +2186,7 @@ static void ftdi_set_termios(struct tty_struct *tty,
2178 2186
2179 /* Set flow control */ 2187 /* Set flow control */
2180 /* Note device also supports DTR/CD (ugh) and Xon/Xoff in hardware */ 2188 /* Note device also supports DTR/CD (ugh) and Xon/Xoff in hardware */
2189no_c_cflag_changes:
2181 if (cflag & CRTSCTS) { 2190 if (cflag & CRTSCTS) {
2182 dbg("%s Setting to CRTSCTS flow control", __func__); 2191 dbg("%s Setting to CRTSCTS flow control", __func__);
2183 if (usb_control_msg(dev, 2192 if (usb_control_msg(dev,
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 571fa96b49c7..055b64ef0bba 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -112,6 +112,7 @@
112 112
113/* Propox devices */ 113/* Propox devices */
114#define FTDI_PROPOX_JTAGCABLEII_PID 0xD738 114#define FTDI_PROPOX_JTAGCABLEII_PID 0xD738
115#define FTDI_PROPOX_ISPCABLEIII_PID 0xD739
115 116
116/* Lenz LI-USB Computer Interface. */ 117/* Lenz LI-USB Computer Interface. */
117#define FTDI_LENZ_LIUSB_PID 0xD780 118#define FTDI_LENZ_LIUSB_PID 0xD780
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 89ae1f65e1b1..6dd64534fad0 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -156,6 +156,7 @@ static void option_instat_callback(struct urb *urb);
156#define HUAWEI_PRODUCT_K4511 0x14CC 156#define HUAWEI_PRODUCT_K4511 0x14CC
157#define HUAWEI_PRODUCT_ETS1220 0x1803 157#define HUAWEI_PRODUCT_ETS1220 0x1803
158#define HUAWEI_PRODUCT_E353 0x1506 158#define HUAWEI_PRODUCT_E353 0x1506
159#define HUAWEI_PRODUCT_E173S 0x1C05
159 160
160#define QUANTA_VENDOR_ID 0x0408 161#define QUANTA_VENDOR_ID 0x0408
161#define QUANTA_PRODUCT_Q101 0xEA02 162#define QUANTA_PRODUCT_Q101 0xEA02
@@ -316,6 +317,9 @@ static void option_instat_callback(struct urb *urb);
316#define ZTE_PRODUCT_AC8710 0xfff1 317#define ZTE_PRODUCT_AC8710 0xfff1
317#define ZTE_PRODUCT_AC2726 0xfff5 318#define ZTE_PRODUCT_AC2726 0xfff5
318#define ZTE_PRODUCT_AC8710T 0xffff 319#define ZTE_PRODUCT_AC8710T 0xffff
320#define ZTE_PRODUCT_MC2718 0xffe8
321#define ZTE_PRODUCT_AD3812 0xffeb
322#define ZTE_PRODUCT_MC2716 0xffed
319 323
320#define BENQ_VENDOR_ID 0x04a5 324#define BENQ_VENDOR_ID 0x04a5
321#define BENQ_PRODUCT_H10 0x4068 325#define BENQ_PRODUCT_H10 0x4068
@@ -468,6 +472,10 @@ static void option_instat_callback(struct urb *urb);
468#define YUGA_PRODUCT_CLU528 0x260D 472#define YUGA_PRODUCT_CLU528 0x260D
469#define YUGA_PRODUCT_CLU526 0x260F 473#define YUGA_PRODUCT_CLU526 0x260F
470 474
475/* Viettel products */
476#define VIETTEL_VENDOR_ID 0x2262
477#define VIETTEL_PRODUCT_VT1000 0x0002
478
471/* some devices interfaces need special handling due to a number of reasons */ 479/* some devices interfaces need special handling due to a number of reasons */
472enum option_blacklist_reason { 480enum option_blacklist_reason {
473 OPTION_BLACKLIST_NONE = 0, 481 OPTION_BLACKLIST_NONE = 0,
@@ -500,6 +508,18 @@ static const struct option_blacklist_info zte_k3765_z_blacklist = {
500 .reserved = BIT(4), 508 .reserved = BIT(4),
501}; 509};
502 510
511static const struct option_blacklist_info zte_ad3812_z_blacklist = {
512 .sendsetup = BIT(0) | BIT(1) | BIT(2),
513};
514
515static const struct option_blacklist_info zte_mc2718_z_blacklist = {
516 .sendsetup = BIT(1) | BIT(2) | BIT(3) | BIT(4),
517};
518
519static const struct option_blacklist_info zte_mc2716_z_blacklist = {
520 .sendsetup = BIT(1) | BIT(2) | BIT(3),
521};
522
503static const struct option_blacklist_info huawei_cdc12_blacklist = { 523static const struct option_blacklist_info huawei_cdc12_blacklist = {
504 .reserved = BIT(1) | BIT(2), 524 .reserved = BIT(1) | BIT(2),
505}; 525};
@@ -622,6 +642,7 @@ static const struct usb_device_id option_ids[] = {
622 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143D, 0xff, 0xff, 0xff) }, 642 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143D, 0xff, 0xff, 0xff) },
623 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143E, 0xff, 0xff, 0xff) }, 643 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143E, 0xff, 0xff, 0xff) },
624 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143F, 0xff, 0xff, 0xff) }, 644 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143F, 0xff, 0xff, 0xff) },
645 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173S, 0xff, 0xff, 0xff) },
625 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff), 646 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff),
626 .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist }, 647 .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
627 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff), 648 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff),
@@ -640,6 +661,14 @@ static const struct usb_device_id option_ids[] = {
640 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x31) }, 661 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x31) },
641 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x32) }, 662 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x32) },
642 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x01) }, 663 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x01) },
664 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x02) },
665 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x03) },
666 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x10) },
667 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x12) },
668 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x13) },
669 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x01) }, /* E398 3G Modem */
670 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x02) }, /* E398 3G PC UI Interface */
671 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x03) }, /* E398 3G Application Interface */
643 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) }, 672 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
644 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) }, 673 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) },
645 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) }, 674 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) },
@@ -726,6 +755,7 @@ static const struct usb_device_id option_ids[] = {
726 { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) }, 755 { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
727 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */ 756 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
728 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ 757 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
758 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
729 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6280) }, /* BP3-USB & BP3-EXT HSDPA */ 759 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6280) }, /* BP3-USB & BP3-EXT HSDPA */
730 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6008) }, 760 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6008) },
731 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) }, 761 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) },
@@ -1043,6 +1073,12 @@ static const struct usb_device_id option_ids[] = {
1043 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) }, 1073 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) },
1044 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) }, 1074 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) },
1045 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710T, 0xff, 0xff, 0xff) }, 1075 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710T, 0xff, 0xff, 0xff) },
1076 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2718, 0xff, 0xff, 0xff),
1077 .driver_info = (kernel_ulong_t)&zte_mc2718_z_blacklist },
1078 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AD3812, 0xff, 0xff, 0xff),
1079 .driver_info = (kernel_ulong_t)&zte_ad3812_z_blacklist },
1080 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2716, 0xff, 0xff, 0xff),
1081 .driver_info = (kernel_ulong_t)&zte_mc2716_z_blacklist },
1046 { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) }, 1082 { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) },
1047 { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) }, 1083 { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) },
1048 { USB_DEVICE(ALINK_VENDOR_ID, DLINK_PRODUCT_DWM_652_U5) }, /* Yes, ALINK_VENDOR_ID */ 1084 { USB_DEVICE(ALINK_VENDOR_ID, DLINK_PRODUCT_DWM_652_U5) }, /* Yes, ALINK_VENDOR_ID */
@@ -1141,6 +1177,7 @@ static const struct usb_device_id option_ids[] = {
1141 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU516) }, 1177 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU516) },
1142 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU528) }, 1178 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU528) },
1143 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU526) }, 1179 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU526) },
1180 { USB_DEVICE_AND_INTERFACE_INFO(VIETTEL_VENDOR_ID, VIETTEL_PRODUCT_VT1000, 0xff, 0xff, 0xff) },
1144 { } /* Terminating entry */ 1181 { } /* Terminating entry */
1145}; 1182};
1146MODULE_DEVICE_TABLE(usb, option_ids); 1183MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 9083d1e616b4..fc2d66f7f4eb 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -91,7 +91,6 @@ static const struct usb_device_id id_table[] = {
91 { USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) }, 91 { USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) },
92 { USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) }, 92 { USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) },
93 { USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530_PRODUCT_ID) }, 93 { USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530_PRODUCT_ID) },
94 { USB_DEVICE(WINCHIPHEAD_VENDOR_ID, WINCHIPHEAD_USBSER_PRODUCT_ID) },
95 { USB_DEVICE(SMART_VENDOR_ID, SMART_PRODUCT_ID) }, 94 { USB_DEVICE(SMART_VENDOR_ID, SMART_PRODUCT_ID) },
96 { } /* Terminating entry */ 95 { } /* Terminating entry */
97}; 96};
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index 3d10d7f02072..c38b8c00c06f 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -145,10 +145,6 @@
145#define ADLINK_VENDOR_ID 0x0b63 145#define ADLINK_VENDOR_ID 0x0b63
146#define ADLINK_ND6530_PRODUCT_ID 0x6530 146#define ADLINK_ND6530_PRODUCT_ID 0x6530
147 147
148/* WinChipHead USB->RS 232 adapter */
149#define WINCHIPHEAD_VENDOR_ID 0x4348
150#define WINCHIPHEAD_USBSER_PRODUCT_ID 0x5523
151
152/* SMART USB Serial Adapter */ 148/* SMART USB Serial Adapter */
153#define SMART_VENDOR_ID 0x0b8c 149#define SMART_VENDOR_ID 0x0b8c
154#define SMART_PRODUCT_ID 0x2303 150#define SMART_PRODUCT_ID 0x2303
diff --git a/drivers/usb/storage/ene_ub6250.c b/drivers/usb/storage/ene_ub6250.c
index 4dca3ef0668c..9fbe742343c6 100644
--- a/drivers/usb/storage/ene_ub6250.c
+++ b/drivers/usb/storage/ene_ub6250.c
@@ -1762,10 +1762,9 @@ static int ms_scsi_write(struct us_data *us, struct scsi_cmnd *srb)
1762 result = ene_send_scsi_cmd(us, FDIR_WRITE, scsi_sglist(srb), 1); 1762 result = ene_send_scsi_cmd(us, FDIR_WRITE, scsi_sglist(srb), 1);
1763 } else { 1763 } else {
1764 void *buf; 1764 void *buf;
1765 int offset; 1765 int offset = 0;
1766 u16 PhyBlockAddr; 1766 u16 PhyBlockAddr;
1767 u8 PageNum; 1767 u8 PageNum;
1768 u32 result;
1769 u16 len, oldphy, newphy; 1768 u16 len, oldphy, newphy;
1770 1769
1771 buf = kmalloc(blenByte, GFP_KERNEL); 1770 buf = kmalloc(blenByte, GFP_KERNEL);
diff --git a/drivers/usb/storage/protocol.c b/drivers/usb/storage/protocol.c
index 93c1a4d86f51..82dd834709c7 100644
--- a/drivers/usb/storage/protocol.c
+++ b/drivers/usb/storage/protocol.c
@@ -59,7 +59,9 @@
59 59
60void usb_stor_pad12_command(struct scsi_cmnd *srb, struct us_data *us) 60void usb_stor_pad12_command(struct scsi_cmnd *srb, struct us_data *us)
61{ 61{
62 /* Pad the SCSI command with zeros out to 12 bytes 62 /*
63 * Pad the SCSI command with zeros out to 12 bytes. If the
64 * command already is 12 bytes or longer, leave it alone.
63 * 65 *
64 * NOTE: This only works because a scsi_cmnd struct field contains 66 * NOTE: This only works because a scsi_cmnd struct field contains
65 * a unsigned char cmnd[16], so we know we have storage available 67 * a unsigned char cmnd[16], so we know we have storage available
@@ -67,9 +69,6 @@ void usb_stor_pad12_command(struct scsi_cmnd *srb, struct us_data *us)
67 for (; srb->cmd_len<12; srb->cmd_len++) 69 for (; srb->cmd_len<12; srb->cmd_len++)
68 srb->cmnd[srb->cmd_len] = 0; 70 srb->cmnd[srb->cmd_len] = 0;
69 71
70 /* set command length to 12 bytes */
71 srb->cmd_len = 12;
72
73 /* send the command to the transport layer */ 72 /* send the command to the transport layer */
74 usb_stor_invoke_transport(srb, us); 73 usb_stor_invoke_transport(srb, us);
75} 74}
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 3041a974faf3..24caba79d722 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1854,6 +1854,13 @@ UNUSUAL_DEV( 0x1370, 0x6828, 0x0110, 0x0110,
1854 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 1854 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1855 US_FL_IGNORE_RESIDUE ), 1855 US_FL_IGNORE_RESIDUE ),
1856 1856
1857/* Reported by Qinglin Ye <yestyle@gmail.com> */
1858UNUSUAL_DEV( 0x13fe, 0x3600, 0x0100, 0x0100,
1859 "Kingston",
1860 "DT 101 G2",
1861 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1862 US_FL_BULK_IGNORE_TAG ),
1863
1857/* Reported by Francesco Foresti <frafore@tiscali.it> */ 1864/* Reported by Francesco Foresti <frafore@tiscali.it> */
1858UNUSUAL_DEV( 0x14cd, 0x6600, 0x0201, 0x0201, 1865UNUSUAL_DEV( 0x14cd, 0x6600, 0x0201, 0x0201,
1859 "Super Top", 1866 "Super Top",
diff --git a/drivers/video/da8xx-fb.c b/drivers/video/da8xx-fb.c
index 55f91d9ab00b..29577bf1f559 100644
--- a/drivers/video/da8xx-fb.c
+++ b/drivers/video/da8xx-fb.c
@@ -116,6 +116,7 @@
116/* Clock registers available only on Version 2 */ 116/* Clock registers available only on Version 2 */
117#define LCD_CLK_ENABLE_REG 0x6c 117#define LCD_CLK_ENABLE_REG 0x6c
118#define LCD_CLK_RESET_REG 0x70 118#define LCD_CLK_RESET_REG 0x70
119#define LCD_CLK_MAIN_RESET BIT(3)
119 120
120#define LCD_NUM_BUFFERS 2 121#define LCD_NUM_BUFFERS 2
121 122
@@ -244,6 +245,10 @@ static inline void lcd_enable_raster(void)
244{ 245{
245 u32 reg; 246 u32 reg;
246 247
248 /* Bring LCDC out of reset */
249 if (lcd_revision == LCD_VERSION_2)
250 lcdc_write(0, LCD_CLK_RESET_REG);
251
247 reg = lcdc_read(LCD_RASTER_CTRL_REG); 252 reg = lcdc_read(LCD_RASTER_CTRL_REG);
248 if (!(reg & LCD_RASTER_ENABLE)) 253 if (!(reg & LCD_RASTER_ENABLE))
249 lcdc_write(reg | LCD_RASTER_ENABLE, LCD_RASTER_CTRL_REG); 254 lcdc_write(reg | LCD_RASTER_ENABLE, LCD_RASTER_CTRL_REG);
@@ -257,6 +262,10 @@ static inline void lcd_disable_raster(void)
257 reg = lcdc_read(LCD_RASTER_CTRL_REG); 262 reg = lcdc_read(LCD_RASTER_CTRL_REG);
258 if (reg & LCD_RASTER_ENABLE) 263 if (reg & LCD_RASTER_ENABLE)
259 lcdc_write(reg & ~LCD_RASTER_ENABLE, LCD_RASTER_CTRL_REG); 264 lcdc_write(reg & ~LCD_RASTER_ENABLE, LCD_RASTER_CTRL_REG);
265
266 if (lcd_revision == LCD_VERSION_2)
267 /* Write 1 to reset LCDC */
268 lcdc_write(LCD_CLK_MAIN_RESET, LCD_CLK_RESET_REG);
260} 269}
261 270
262static void lcd_blit(int load_mode, struct da8xx_fb_par *par) 271static void lcd_blit(int load_mode, struct da8xx_fb_par *par)
@@ -584,8 +593,12 @@ static void lcd_reset(struct da8xx_fb_par *par)
584 lcdc_write(0, LCD_DMA_CTRL_REG); 593 lcdc_write(0, LCD_DMA_CTRL_REG);
585 lcdc_write(0, LCD_RASTER_CTRL_REG); 594 lcdc_write(0, LCD_RASTER_CTRL_REG);
586 595
587 if (lcd_revision == LCD_VERSION_2) 596 if (lcd_revision == LCD_VERSION_2) {
588 lcdc_write(0, LCD_INT_ENABLE_SET_REG); 597 lcdc_write(0, LCD_INT_ENABLE_SET_REG);
598 /* Write 1 to reset */
599 lcdc_write(LCD_CLK_MAIN_RESET, LCD_CLK_RESET_REG);
600 lcdc_write(0, LCD_CLK_RESET_REG);
601 }
589} 602}
590 603
591static void lcd_calc_clk_divider(struct da8xx_fb_par *par) 604static void lcd_calc_clk_divider(struct da8xx_fb_par *par)
diff --git a/drivers/video/omap/dispc.c b/drivers/video/omap/dispc.c
index 0ccd7adf47bb..6f61e781f15a 100644
--- a/drivers/video/omap/dispc.c
+++ b/drivers/video/omap/dispc.c
@@ -19,6 +19,7 @@
19 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 19 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 */ 20 */
21#include <linux/kernel.h> 21#include <linux/kernel.h>
22#include <linux/module.h>
22#include <linux/dma-mapping.h> 23#include <linux/dma-mapping.h>
23#include <linux/mm.h> 24#include <linux/mm.h>
24#include <linux/vmalloc.h> 25#include <linux/vmalloc.h>
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c
index 3532782551cb..5c81533eacaa 100644
--- a/drivers/video/omap2/dss/dispc.c
+++ b/drivers/video/omap2/dss/dispc.c
@@ -1720,12 +1720,11 @@ static int dispc_ovl_calc_scaling(enum omap_plane plane,
1720 const int maxdownscale = dss_feat_get_param_max(FEAT_PARAM_DOWNSCALE); 1720 const int maxdownscale = dss_feat_get_param_max(FEAT_PARAM_DOWNSCALE);
1721 unsigned long fclk = 0; 1721 unsigned long fclk = 0;
1722 1722
1723 if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0) { 1723 if (width == out_width && height == out_height)
1724 if (width != out_width || height != out_height) 1724 return 0;
1725 return -EINVAL; 1725
1726 else 1726 if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0)
1727 return 0; 1727 return -EINVAL;
1728 }
1729 1728
1730 if (out_width < width / maxdownscale || 1729 if (out_width < width / maxdownscale ||
1731 out_width > width * 8) 1730 out_width > width * 8)
diff --git a/drivers/video/omap2/dss/hdmi.c b/drivers/video/omap2/dss/hdmi.c
index 3262f0f1fa35..c56378c555b0 100644
--- a/drivers/video/omap2/dss/hdmi.c
+++ b/drivers/video/omap2/dss/hdmi.c
@@ -269,7 +269,7 @@ static void update_hdmi_timings(struct hdmi_config *cfg,
269unsigned long hdmi_get_pixel_clock(void) 269unsigned long hdmi_get_pixel_clock(void)
270{ 270{
271 /* HDMI Pixel Clock in Mhz */ 271 /* HDMI Pixel Clock in Mhz */
272 return hdmi.ip_data.cfg.timings.timings.pixel_clock * 10000; 272 return hdmi.ip_data.cfg.timings.timings.pixel_clock * 1000;
273} 273}
274 274
275static void hdmi_compute_pll(struct omap_dss_device *dssdev, int phy, 275static void hdmi_compute_pll(struct omap_dss_device *dssdev, int phy,
diff --git a/drivers/video/via/share.h b/drivers/video/via/share.h
index 69d882cbe709..c01c1c162726 100644
--- a/drivers/video/via/share.h
+++ b/drivers/video/via/share.h
@@ -559,8 +559,8 @@
559#define M1200X720_R60_VSP POSITIVE 559#define M1200X720_R60_VSP POSITIVE
560 560
561/* 1200x900@60 Sync Polarity (DCON) */ 561/* 1200x900@60 Sync Polarity (DCON) */
562#define M1200X900_R60_HSP NEGATIVE 562#define M1200X900_R60_HSP POSITIVE
563#define M1200X900_R60_VSP NEGATIVE 563#define M1200X900_R60_VSP POSITIVE
564 564
565/* 1280x600@60 Sync Polarity (GTF Mode) */ 565/* 1280x600@60 Sync Polarity (GTF Mode) */
566#define M1280x600_R60_HSP NEGATIVE 566#define M1280x600_R60_HSP NEGATIVE
diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
index 816ed08e7cf3..1a61939b85fc 100644
--- a/drivers/virtio/Kconfig
+++ b/drivers/virtio/Kconfig
@@ -37,7 +37,7 @@ config VIRTIO_BALLOON
37 37
38 config VIRTIO_MMIO 38 config VIRTIO_MMIO
39 tristate "Platform bus driver for memory mapped virtio devices (EXPERIMENTAL)" 39 tristate "Platform bus driver for memory mapped virtio devices (EXPERIMENTAL)"
40 depends on EXPERIMENTAL 40 depends on HAS_IOMEM && EXPERIMENTAL
41 select VIRTIO 41 select VIRTIO
42 select VIRTIO_RING 42 select VIRTIO_RING
43 ---help--- 43 ---help---
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index acc5e43c373e..7317dc2ec426 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -118,7 +118,7 @@ static void vm_finalize_features(struct virtio_device *vdev)
118 vring_transport_features(vdev); 118 vring_transport_features(vdev);
119 119
120 for (i = 0; i < ARRAY_SIZE(vdev->features); i++) { 120 for (i = 0; i < ARRAY_SIZE(vdev->features); i++) {
121 writel(i, vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES_SET); 121 writel(i, vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES_SEL);
122 writel(vdev->features[i], 122 writel(vdev->features[i],
123 vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES); 123 vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES);
124 } 124 }
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
index 79a31e5b4b68..03d1984bd363 100644
--- a/drivers/virtio/virtio_pci.c
+++ b/drivers/virtio/virtio_pci.c
@@ -169,11 +169,29 @@ static void vp_set_status(struct virtio_device *vdev, u8 status)
169 iowrite8(status, vp_dev->ioaddr + VIRTIO_PCI_STATUS); 169 iowrite8(status, vp_dev->ioaddr + VIRTIO_PCI_STATUS);
170} 170}
171 171
172/* wait for pending irq handlers */
173static void vp_synchronize_vectors(struct virtio_device *vdev)
174{
175 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
176 int i;
177
178 if (vp_dev->intx_enabled)
179 synchronize_irq(vp_dev->pci_dev->irq);
180
181 for (i = 0; i < vp_dev->msix_vectors; ++i)
182 synchronize_irq(vp_dev->msix_entries[i].vector);
183}
184
172static void vp_reset(struct virtio_device *vdev) 185static void vp_reset(struct virtio_device *vdev)
173{ 186{
174 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 187 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
175 /* 0 status means a reset. */ 188 /* 0 status means a reset. */
176 iowrite8(0, vp_dev->ioaddr + VIRTIO_PCI_STATUS); 189 iowrite8(0, vp_dev->ioaddr + VIRTIO_PCI_STATUS);
190 /* Flush out the status write, and flush in device writes,
191 * including MSi-X interrupts, if any. */
192 ioread8(vp_dev->ioaddr + VIRTIO_PCI_STATUS);
193 /* Flush pending VQ/configuration callbacks. */
194 vp_synchronize_vectors(vdev);
177} 195}
178 196
179/* the notify function used when creating a virt queue */ 197/* the notify function used when creating a virt queue */
@@ -594,11 +612,11 @@ static struct virtio_config_ops virtio_pci_config_ops = {
594 612
595static void virtio_pci_release_dev(struct device *_d) 613static void virtio_pci_release_dev(struct device *_d)
596{ 614{
597 struct virtio_device *dev = container_of(_d, struct virtio_device, 615 /*
598 dev); 616 * No need for a release method as we allocate/free
599 struct virtio_pci_device *vp_dev = to_vp_device(dev); 617 * all devices together with the pci devices.
600 618 * Provide an empty one to avoid getting a warning from core.
601 kfree(vp_dev); 619 */
602} 620}
603 621
604/* the PCI probing function */ 622/* the PCI probing function */
@@ -686,6 +704,7 @@ static void __devexit virtio_pci_remove(struct pci_dev *pci_dev)
686 pci_iounmap(pci_dev, vp_dev->ioaddr); 704 pci_iounmap(pci_dev, vp_dev->ioaddr);
687 pci_release_regions(pci_dev); 705 pci_release_regions(pci_dev);
688 pci_disable_device(pci_dev); 706 pci_disable_device(pci_dev);
707 kfree(vp_dev);
689} 708}
690 709
691#ifdef CONFIG_PM 710#ifdef CONFIG_PM
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 6285867a9356..79fd606b7cd5 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -314,13 +314,6 @@ config NUC900_WATCHDOG
314 To compile this driver as a module, choose M here: the 314 To compile this driver as a module, choose M here: the
315 module will be called nuc900_wdt. 315 module will be called nuc900_wdt.
316 316
317config ADX_WATCHDOG
318 tristate "Avionic Design Xanthos watchdog"
319 depends on ARCH_PXA_ADX
320 help
321 Say Y here if you want support for the watchdog timer on Avionic
322 Design Xanthos boards.
323
324config TS72XX_WATCHDOG 317config TS72XX_WATCHDOG
325 tristate "TS-72XX SBC Watchdog" 318 tristate "TS-72XX SBC Watchdog"
326 depends on MACH_TS72XX 319 depends on MACH_TS72XX
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 55bd5740e910..fe893e91935b 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -51,7 +51,6 @@ obj-$(CONFIG_ORION_WATCHDOG) += orion_wdt.o
51obj-$(CONFIG_COH901327_WATCHDOG) += coh901327_wdt.o 51obj-$(CONFIG_COH901327_WATCHDOG) += coh901327_wdt.o
52obj-$(CONFIG_STMP3XXX_WATCHDOG) += stmp3xxx_wdt.o 52obj-$(CONFIG_STMP3XXX_WATCHDOG) += stmp3xxx_wdt.o
53obj-$(CONFIG_NUC900_WATCHDOG) += nuc900_wdt.o 53obj-$(CONFIG_NUC900_WATCHDOG) += nuc900_wdt.o
54obj-$(CONFIG_ADX_WATCHDOG) += adx_wdt.o
55obj-$(CONFIG_TS72XX_WATCHDOG) += ts72xx_wdt.o 54obj-$(CONFIG_TS72XX_WATCHDOG) += ts72xx_wdt.o
56obj-$(CONFIG_IMX2_WDT) += imx2_wdt.o 55obj-$(CONFIG_IMX2_WDT) += imx2_wdt.o
57 56
diff --git a/drivers/watchdog/adx_wdt.c b/drivers/watchdog/adx_wdt.c
deleted file mode 100644
index af6e6b16475a..000000000000
--- a/drivers/watchdog/adx_wdt.c
+++ /dev/null
@@ -1,355 +0,0 @@
1/*
2 * Copyright (C) 2008-2009 Avionic Design GmbH
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/fs.h>
10#include <linux/gfp.h>
11#include <linux/io.h>
12#include <linux/miscdevice.h>
13#include <linux/module.h>
14#include <linux/platform_device.h>
15#include <linux/types.h>
16#include <linux/uaccess.h>
17#include <linux/watchdog.h>
18
19#define WATCHDOG_NAME "adx-wdt"
20
21/* register offsets */
22#define ADX_WDT_CONTROL 0x00
23#define ADX_WDT_CONTROL_ENABLE (1 << 0)
24#define ADX_WDT_CONTROL_nRESET (1 << 1)
25#define ADX_WDT_TIMEOUT 0x08
26
27static struct platform_device *adx_wdt_dev;
28static unsigned long driver_open;
29
30#define WDT_STATE_STOP 0
31#define WDT_STATE_START 1
32
33struct adx_wdt {
34 void __iomem *base;
35 unsigned long timeout;
36 unsigned int state;
37 unsigned int wake;
38 spinlock_t lock;
39};
40
41static const struct watchdog_info adx_wdt_info = {
42 .identity = "Avionic Design Xanthos Watchdog",
43 .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
44};
45
46static void adx_wdt_start_locked(struct adx_wdt *wdt)
47{
48 u32 ctrl;
49
50 ctrl = readl(wdt->base + ADX_WDT_CONTROL);
51 ctrl |= ADX_WDT_CONTROL_ENABLE;
52 writel(ctrl, wdt->base + ADX_WDT_CONTROL);
53 wdt->state = WDT_STATE_START;
54}
55
56static void adx_wdt_start(struct adx_wdt *wdt)
57{
58 unsigned long flags;
59
60 spin_lock_irqsave(&wdt->lock, flags);
61 adx_wdt_start_locked(wdt);
62 spin_unlock_irqrestore(&wdt->lock, flags);
63}
64
65static void adx_wdt_stop_locked(struct adx_wdt *wdt)
66{
67 u32 ctrl;
68
69 ctrl = readl(wdt->base + ADX_WDT_CONTROL);
70 ctrl &= ~ADX_WDT_CONTROL_ENABLE;
71 writel(ctrl, wdt->base + ADX_WDT_CONTROL);
72 wdt->state = WDT_STATE_STOP;
73}
74
75static void adx_wdt_stop(struct adx_wdt *wdt)
76{
77 unsigned long flags;
78
79 spin_lock_irqsave(&wdt->lock, flags);
80 adx_wdt_stop_locked(wdt);
81 spin_unlock_irqrestore(&wdt->lock, flags);
82}
83
84static void adx_wdt_set_timeout(struct adx_wdt *wdt, unsigned long seconds)
85{
86 unsigned long timeout = seconds * 1000;
87 unsigned long flags;
88 unsigned int state;
89
90 spin_lock_irqsave(&wdt->lock, flags);
91 state = wdt->state;
92 adx_wdt_stop_locked(wdt);
93 writel(timeout, wdt->base + ADX_WDT_TIMEOUT);
94
95 if (state == WDT_STATE_START)
96 adx_wdt_start_locked(wdt);
97
98 wdt->timeout = timeout;
99 spin_unlock_irqrestore(&wdt->lock, flags);
100}
101
102static void adx_wdt_get_timeout(struct adx_wdt *wdt, unsigned long *seconds)
103{
104 *seconds = wdt->timeout / 1000;
105}
106
107static void adx_wdt_keepalive(struct adx_wdt *wdt)
108{
109 unsigned long flags;
110
111 spin_lock_irqsave(&wdt->lock, flags);
112 writel(wdt->timeout, wdt->base + ADX_WDT_TIMEOUT);
113 spin_unlock_irqrestore(&wdt->lock, flags);
114}
115
116static int adx_wdt_open(struct inode *inode, struct file *file)
117{
118 struct adx_wdt *wdt = platform_get_drvdata(adx_wdt_dev);
119
120 if (test_and_set_bit(0, &driver_open))
121 return -EBUSY;
122
123 file->private_data = wdt;
124 adx_wdt_set_timeout(wdt, 30);
125 adx_wdt_start(wdt);
126
127 return nonseekable_open(inode, file);
128}
129
130static int adx_wdt_release(struct inode *inode, struct file *file)
131{
132 struct adx_wdt *wdt = file->private_data;
133
134 adx_wdt_stop(wdt);
135 clear_bit(0, &driver_open);
136
137 return 0;
138}
139
140static long adx_wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
141{
142 struct adx_wdt *wdt = file->private_data;
143 void __user *argp = (void __user *)arg;
144 unsigned long __user *p = argp;
145 unsigned long seconds = 0;
146 unsigned int options;
147 long ret = -EINVAL;
148
149 switch (cmd) {
150 case WDIOC_GETSUPPORT:
151 if (copy_to_user(argp, &adx_wdt_info, sizeof(adx_wdt_info)))
152 return -EFAULT;
153 else
154 return 0;
155
156 case WDIOC_GETSTATUS:
157 case WDIOC_GETBOOTSTATUS:
158 return put_user(0, p);
159
160 case WDIOC_KEEPALIVE:
161 adx_wdt_keepalive(wdt);
162 return 0;
163
164 case WDIOC_SETTIMEOUT:
165 if (get_user(seconds, p))
166 return -EFAULT;
167
168 adx_wdt_set_timeout(wdt, seconds);
169
170 /* fallthrough */
171 case WDIOC_GETTIMEOUT:
172 adx_wdt_get_timeout(wdt, &seconds);
173 return put_user(seconds, p);
174
175 case WDIOC_SETOPTIONS:
176 if (copy_from_user(&options, argp, sizeof(options)))
177 return -EFAULT;
178
179 if (options & WDIOS_DISABLECARD) {
180 adx_wdt_stop(wdt);
181 ret = 0;
182 }
183
184 if (options & WDIOS_ENABLECARD) {
185 adx_wdt_start(wdt);
186 ret = 0;
187 }
188
189 return ret;
190
191 default:
192 break;
193 }
194
195 return -ENOTTY;
196}
197
198static ssize_t adx_wdt_write(struct file *file, const char __user *data,
199 size_t len, loff_t *ppos)
200{
201 struct adx_wdt *wdt = file->private_data;
202
203 if (len)
204 adx_wdt_keepalive(wdt);
205
206 return len;
207}
208
209static const struct file_operations adx_wdt_fops = {
210 .owner = THIS_MODULE,
211 .llseek = no_llseek,
212 .open = adx_wdt_open,
213 .release = adx_wdt_release,
214 .unlocked_ioctl = adx_wdt_ioctl,
215 .write = adx_wdt_write,
216};
217
218static struct miscdevice adx_wdt_miscdev = {
219 .minor = WATCHDOG_MINOR,
220 .name = "watchdog",
221 .fops = &adx_wdt_fops,
222};
223
224static int __devinit adx_wdt_probe(struct platform_device *pdev)
225{
226 struct resource *res;
227 struct adx_wdt *wdt;
228 int ret = 0;
229 u32 ctrl;
230
231 wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
232 if (!wdt) {
233 dev_err(&pdev->dev, "cannot allocate WDT structure\n");
234 return -ENOMEM;
235 }
236
237 spin_lock_init(&wdt->lock);
238
239 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
240 if (!res) {
241 dev_err(&pdev->dev, "cannot obtain I/O memory region\n");
242 return -ENXIO;
243 }
244
245 res = devm_request_mem_region(&pdev->dev, res->start,
246 resource_size(res), res->name);
247 if (!res) {
248 dev_err(&pdev->dev, "cannot request I/O memory region\n");
249 return -ENXIO;
250 }
251
252 wdt->base = devm_ioremap_nocache(&pdev->dev, res->start,
253 resource_size(res));
254 if (!wdt->base) {
255 dev_err(&pdev->dev, "cannot remap I/O memory region\n");
256 return -ENXIO;
257 }
258
259 /* disable watchdog and reboot on timeout */
260 ctrl = readl(wdt->base + ADX_WDT_CONTROL);
261 ctrl &= ~ADX_WDT_CONTROL_ENABLE;
262 ctrl &= ~ADX_WDT_CONTROL_nRESET;
263 writel(ctrl, wdt->base + ADX_WDT_CONTROL);
264
265 platform_set_drvdata(pdev, wdt);
266 adx_wdt_dev = pdev;
267
268 ret = misc_register(&adx_wdt_miscdev);
269 if (ret) {
270 dev_err(&pdev->dev, "cannot register miscdev on minor %d "
271 "(err=%d)\n", WATCHDOG_MINOR, ret);
272 return ret;
273 }
274
275 return 0;
276}
277
278static int __devexit adx_wdt_remove(struct platform_device *pdev)
279{
280 struct adx_wdt *wdt = platform_get_drvdata(pdev);
281
282 misc_deregister(&adx_wdt_miscdev);
283 adx_wdt_stop(wdt);
284 platform_set_drvdata(pdev, NULL);
285
286 return 0;
287}
288
289static void adx_wdt_shutdown(struct platform_device *pdev)
290{
291 struct adx_wdt *wdt = platform_get_drvdata(pdev);
292 adx_wdt_stop(wdt);
293}
294
295#ifdef CONFIG_PM
296static int adx_wdt_suspend(struct device *dev)
297{
298 struct platform_device *pdev = to_platform_device(dev);
299 struct adx_wdt *wdt = platform_get_drvdata(pdev);
300
301 wdt->wake = (wdt->state == WDT_STATE_START) ? 1 : 0;
302 adx_wdt_stop(wdt);
303
304 return 0;
305}
306
307static int adx_wdt_resume(struct device *dev)
308{
309 struct platform_device *pdev = to_platform_device(dev);
310 struct adx_wdt *wdt = platform_get_drvdata(pdev);
311
312 if (wdt->wake)
313 adx_wdt_start(wdt);
314
315 return 0;
316}
317
318static const struct dev_pm_ops adx_wdt_pm_ops = {
319 .suspend = adx_wdt_suspend,
320 .resume = adx_wdt_resume,
321};
322
323# define ADX_WDT_PM_OPS (&adx_wdt_pm_ops)
324#else
325# define ADX_WDT_PM_OPS NULL
326#endif
327
328static struct platform_driver adx_wdt_driver = {
329 .probe = adx_wdt_probe,
330 .remove = __devexit_p(adx_wdt_remove),
331 .shutdown = adx_wdt_shutdown,
332 .driver = {
333 .name = WATCHDOG_NAME,
334 .owner = THIS_MODULE,
335 .pm = ADX_WDT_PM_OPS,
336 },
337};
338
339static int __init adx_wdt_init(void)
340{
341 return platform_driver_register(&adx_wdt_driver);
342}
343
344static void __exit adx_wdt_exit(void)
345{
346 platform_driver_unregister(&adx_wdt_driver);
347}
348
349module_init(adx_wdt_init);
350module_exit(adx_wdt_exit);
351
352MODULE_DESCRIPTION("Avionic Design Xanthos Watchdog Driver");
353MODULE_LICENSE("GPL v2");
354MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
355MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index 5de7e4fa5b8a..a79e3840782a 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -401,8 +401,8 @@ static int __devinit s3c2410wdt_probe(struct platform_device *pdev)
401 401
402 dev_info(dev, "watchdog %sactive, reset %sabled, irq %sabled\n", 402 dev_info(dev, "watchdog %sactive, reset %sabled, irq %sabled\n",
403 (wtcon & S3C2410_WTCON_ENABLE) ? "" : "in", 403 (wtcon & S3C2410_WTCON_ENABLE) ? "" : "in",
404 (wtcon & S3C2410_WTCON_RSTEN) ? "" : "dis", 404 (wtcon & S3C2410_WTCON_RSTEN) ? "en" : "dis",
405 (wtcon & S3C2410_WTCON_INTEN) ? "" : "en"); 405 (wtcon & S3C2410_WTCON_INTEN) ? "en" : "dis");
406 406
407 return 0; 407 return 0;
408 408
diff --git a/drivers/watchdog/wm831x_wdt.c b/drivers/watchdog/wm831x_wdt.c
index 7be38556aed0..e789a47db41f 100644
--- a/drivers/watchdog/wm831x_wdt.c
+++ b/drivers/watchdog/wm831x_wdt.c
@@ -150,7 +150,7 @@ static int wm831x_wdt_set_timeout(struct watchdog_device *wdt_dev,
150 if (wm831x_wdt_cfgs[i].time == timeout) 150 if (wm831x_wdt_cfgs[i].time == timeout)
151 break; 151 break;
152 if (i == ARRAY_SIZE(wm831x_wdt_cfgs)) 152 if (i == ARRAY_SIZE(wm831x_wdt_cfgs))
153 ret = -EINVAL; 153 return -EINVAL;
154 154
155 ret = wm831x_reg_unlock(wm831x); 155 ret = wm831x_reg_unlock(wm831x);
156 if (ret == 0) { 156 if (ret == 0) {
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index a767884a6c7a..31ab82fda38a 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -501,7 +501,7 @@ EXPORT_SYMBOL_GPL(balloon_set_new_target);
501 * alloc_xenballooned_pages - get pages that have been ballooned out 501 * alloc_xenballooned_pages - get pages that have been ballooned out
502 * @nr_pages: Number of pages to get 502 * @nr_pages: Number of pages to get
503 * @pages: pages returned 503 * @pages: pages returned
504 * @highmem: highmem or lowmem pages 504 * @highmem: allow highmem pages
505 * @return 0 on success, error otherwise 505 * @return 0 on success, error otherwise
506 */ 506 */
507int alloc_xenballooned_pages(int nr_pages, struct page **pages, bool highmem) 507int alloc_xenballooned_pages(int nr_pages, struct page **pages, bool highmem)
@@ -511,7 +511,7 @@ int alloc_xenballooned_pages(int nr_pages, struct page **pages, bool highmem)
511 mutex_lock(&balloon_mutex); 511 mutex_lock(&balloon_mutex);
512 while (pgno < nr_pages) { 512 while (pgno < nr_pages) {
513 page = balloon_retrieve(highmem); 513 page = balloon_retrieve(highmem);
514 if (page && PageHighMem(page) == highmem) { 514 if (page && (highmem || !PageHighMem(page))) {
515 pages[pgno++] = page; 515 pages[pgno++] = page;
516 } else { 516 } else {
517 enum bp_state st; 517 enum bp_state st;
diff --git a/drivers/xen/gntalloc.c b/drivers/xen/gntalloc.c
index f6832f46aea4..e1c4c6e5b469 100644
--- a/drivers/xen/gntalloc.c
+++ b/drivers/xen/gntalloc.c
@@ -135,7 +135,7 @@ static int add_grefs(struct ioctl_gntalloc_alloc_gref *op,
135 /* Grant foreign access to the page. */ 135 /* Grant foreign access to the page. */
136 gref->gref_id = gnttab_grant_foreign_access(op->domid, 136 gref->gref_id = gnttab_grant_foreign_access(op->domid,
137 pfn_to_mfn(page_to_pfn(gref->page)), readonly); 137 pfn_to_mfn(page_to_pfn(gref->page)), readonly);
138 if (gref->gref_id < 0) { 138 if ((int)gref->gref_id < 0) {
139 rc = gref->gref_id; 139 rc = gref->gref_id;
140 goto undo; 140 goto undo;
141 } 141 }
@@ -280,7 +280,7 @@ static long gntalloc_ioctl_alloc(struct gntalloc_file_private_data *priv,
280 goto out; 280 goto out;
281 } 281 }
282 282
283 gref_ids = kzalloc(sizeof(gref_ids[0]) * op.count, GFP_TEMPORARY); 283 gref_ids = kcalloc(op.count, sizeof(gref_ids[0]), GFP_TEMPORARY);
284 if (!gref_ids) { 284 if (!gref_ids) {
285 rc = -ENOMEM; 285 rc = -ENOMEM;
286 goto out; 286 goto out;
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 39871326afa2..afca14d9042e 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -114,11 +114,11 @@ static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count)
114 if (NULL == add) 114 if (NULL == add)
115 return NULL; 115 return NULL;
116 116
117 add->grants = kzalloc(sizeof(add->grants[0]) * count, GFP_KERNEL); 117 add->grants = kcalloc(count, sizeof(add->grants[0]), GFP_KERNEL);
118 add->map_ops = kzalloc(sizeof(add->map_ops[0]) * count, GFP_KERNEL); 118 add->map_ops = kcalloc(count, sizeof(add->map_ops[0]), GFP_KERNEL);
119 add->unmap_ops = kzalloc(sizeof(add->unmap_ops[0]) * count, GFP_KERNEL); 119 add->unmap_ops = kcalloc(count, sizeof(add->unmap_ops[0]), GFP_KERNEL);
120 add->kmap_ops = kzalloc(sizeof(add->kmap_ops[0]) * count, GFP_KERNEL); 120 add->kmap_ops = kcalloc(count, sizeof(add->kmap_ops[0]), GFP_KERNEL);
121 add->pages = kzalloc(sizeof(add->pages[0]) * count, GFP_KERNEL); 121 add->pages = kcalloc(count, sizeof(add->pages[0]), GFP_KERNEL);
122 if (NULL == add->grants || 122 if (NULL == add->grants ||
123 NULL == add->map_ops || 123 NULL == add->map_ops ||
124 NULL == add->unmap_ops || 124 NULL == add->unmap_ops ||
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 4864e5d72e72..19e6a2041371 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -166,7 +166,7 @@ retry:
166 /* 166 /*
167 * Get IO TLB memory from any location. 167 * Get IO TLB memory from any location.
168 */ 168 */
169 xen_io_tlb_start = alloc_bootmem(bytes); 169 xen_io_tlb_start = alloc_bootmem_pages(PAGE_ALIGN(bytes));
170 if (!xen_io_tlb_start) { 170 if (!xen_io_tlb_start) {
171 m = "Cannot allocate Xen-SWIOTLB buffer!\n"; 171 m = "Cannot allocate Xen-SWIOTLB buffer!\n";
172 goto error; 172 goto error;
@@ -179,7 +179,7 @@ retry:
179 bytes, 179 bytes,
180 xen_io_tlb_nslabs); 180 xen_io_tlb_nslabs);
181 if (rc) { 181 if (rc) {
182 free_bootmem(__pa(xen_io_tlb_start), bytes); 182 free_bootmem(__pa(xen_io_tlb_start), PAGE_ALIGN(bytes));
183 m = "Failed to get contiguous memory for DMA from Xen!\n"\ 183 m = "Failed to get contiguous memory for DMA from Xen!\n"\
184 "You either: don't have the permissions, do not have"\ 184 "You either: don't have the permissions, do not have"\
185 " enough free memory under 4GB, or the hypervisor memory"\ 185 " enough free memory under 4GB, or the hypervisor memory"\
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index 81c3ce6b8bbe..1906125eab49 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -35,6 +35,7 @@
35#include <linux/vmalloc.h> 35#include <linux/vmalloc.h>
36#include <linux/export.h> 36#include <linux/export.h>
37#include <asm/xen/hypervisor.h> 37#include <asm/xen/hypervisor.h>
38#include <asm/xen/page.h>
38#include <xen/interface/xen.h> 39#include <xen/interface/xen.h>
39#include <xen/interface/event_channel.h> 40#include <xen/interface/event_channel.h>
40#include <xen/events.h> 41#include <xen/events.h>
@@ -436,19 +437,20 @@ EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
436int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr) 437int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr)
437{ 438{
438 struct gnttab_map_grant_ref op = { 439 struct gnttab_map_grant_ref op = {
439 .flags = GNTMAP_host_map, 440 .flags = GNTMAP_host_map | GNTMAP_contains_pte,
440 .ref = gnt_ref, 441 .ref = gnt_ref,
441 .dom = dev->otherend_id, 442 .dom = dev->otherend_id,
442 }; 443 };
443 struct vm_struct *area; 444 struct vm_struct *area;
445 pte_t *pte;
444 446
445 *vaddr = NULL; 447 *vaddr = NULL;
446 448
447 area = alloc_vm_area(PAGE_SIZE); 449 area = alloc_vm_area(PAGE_SIZE, &pte);
448 if (!area) 450 if (!area)
449 return -ENOMEM; 451 return -ENOMEM;
450 452
451 op.host_addr = (unsigned long)area->addr; 453 op.host_addr = arbitrary_virt_to_machine(pte).maddr;
452 454
453 if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)) 455 if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
454 BUG(); 456 BUG();
@@ -527,6 +529,7 @@ int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
527 struct gnttab_unmap_grant_ref op = { 529 struct gnttab_unmap_grant_ref op = {
528 .host_addr = (unsigned long)vaddr, 530 .host_addr = (unsigned long)vaddr,
529 }; 531 };
532 unsigned int level;
530 533
531 /* It'd be nice if linux/vmalloc.h provided a find_vm_area(void *addr) 534 /* It'd be nice if linux/vmalloc.h provided a find_vm_area(void *addr)
532 * method so that we don't have to muck with vmalloc internals here. 535 * method so that we don't have to muck with vmalloc internals here.
@@ -548,6 +551,8 @@ int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
548 } 551 }
549 552
550 op.handle = (grant_handle_t)area->phys_addr; 553 op.handle = (grant_handle_t)area->phys_addr;
554 op.host_addr = arbitrary_virt_to_machine(
555 lookup_address((unsigned long)vaddr, &level)).maddr;
551 556
552 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) 557 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
553 BUG(); 558 BUG();