aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorGrant Likely <grant.likely@secretlab.ca>2011-02-28 03:36:21 -0500
committerGrant Likely <grant.likely@secretlab.ca>2011-02-28 03:36:21 -0500
commit38a5d6736e7f714cc56d58692001e66dcbb98799 (patch)
tree337d32375e10b04642013710c2c424514474b32c /drivers
parent0bfd95a2a104dfc2469d68de52df99939371c0d4 (diff)
parentf5412be599602124d2bdd49947b231dd77c0bf99 (diff)
Merge commit 'v2.6.38-rc6' into devicetree/next
Conflicts: drivers/spi/pxa2xx_spi_pci.c
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpica/evxfgpe.c49
-rw-r--r--drivers/acpi/osl.c25
-rw-r--r--drivers/acpi/video_detect.c5
-rw-r--r--drivers/acpi/wakeup.c6
-rw-r--r--drivers/atm/solos-pci.c5
-rw-r--r--drivers/block/Makefile2
-rw-r--r--drivers/block/aoe/Makefile2
-rw-r--r--drivers/block/cciss.c2
-rw-r--r--drivers/block/loop.c3
-rw-r--r--drivers/block/nbd.c3
-rw-r--r--drivers/bluetooth/ath3k.c2
-rw-r--r--drivers/bluetooth/btusb.c3
-rw-r--r--drivers/cdrom/cdrom.c3
-rw-r--r--drivers/char/Makefile1
-rw-r--r--drivers/char/agp/Kconfig2
-rw-r--r--drivers/char/agp/amd-k7-agp.c19
-rw-r--r--drivers/char/agp/intel-agp.c27
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c12
-rw-r--r--drivers/char/virtio_console.c (renamed from drivers/tty/hvc/virtio_console.c)20
-rw-r--r--drivers/dma/amba-pl08x.c53
-rw-r--r--drivers/dma/imx-dma.c26
-rw-r--r--drivers/dma/imx-sdma.c88
-rw-r--r--drivers/dma/ipu/ipu_idmac.c50
-rw-r--r--drivers/edac/amd64_edac.c28
-rw-r--r--drivers/firmware/dmi_scan.c11
-rw-r--r--drivers/gpio/pca953x.c28
-rw-r--r--drivers/gpu/drm/drm_crtc.c20
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c33
-rw-r--r--drivers/gpu/drm/drm_info.c9
-rw-r--r--drivers/gpu/drm/drm_irq.c7
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c15
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h1
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c8
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h14
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c10
-rw-r--r--drivers/gpu/drm/i915/intel_display.c106
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c36
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h1
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c39
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c12
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c37
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c26
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c112
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c43
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_pm.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dfp.c12
-rw-r--r--drivers/gpu/drm/nouveau/nv40_graph.c46
-rw-r--r--drivers/gpu/drm/nouveau/nv50_evo.c3
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c96
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c61
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_kms.c33
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_shaders.c8
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h3
-rw-r--r--drivers/gpu/drm/radeon/mkregtable.c5
-rw-r--r--drivers/gpu/drm/radeon/r100.c63
-rw-r--r--drivers/gpu/drm/radeon/r100_track.h13
-rw-r--r--drivers/gpu/drm/radeon/r200.c18
-rw-r--r--drivers/gpu/drm/radeon/r300.c44
-rw-r--r--drivers/gpu/drm/radeon/r300_reg.h2
-rw-r--r--drivers/gpu/drm/radeon/r600.c30
-rw-r--r--drivers/gpu/drm/radeon/r600_blit.c11
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c29
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_shaders.c4
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c31
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c46
-rw-r--r--drivers/gpu/drm/radeon/r600d.h9
-rw-r--r--drivers/gpu/drm/radeon/radeon.h11
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c107
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c58
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c142
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c26
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h24
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c4
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r3006
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r4207
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/rs6006
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/rv5157
-rw-r--r--drivers/gpu/drm/radeon/rs690.c12
-rw-r--r--drivers/gpu/drm/radeon/rv770.c29
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h8
-rw-r--r--drivers/gpu/stub/Kconfig2
-rw-r--r--drivers/hwmon/Kconfig19
-rw-r--r--drivers/hwmon/emc1403.c2
-rw-r--r--drivers/hwmon/jc42.c35
-rw-r--r--drivers/hwmon/k10temp.c5
-rw-r--r--drivers/hwmon/lm63.c59
-rw-r--r--drivers/hwmon/lm85.c23
-rw-r--r--drivers/infiniband/core/sa_query.c2
-rw-r--r--drivers/infiniband/core/ucma.c22
-rw-r--r--drivers/infiniband/hw/amso1100/c2_vq.c6
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c4
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c32
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c30
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c5
-rw-r--r--drivers/input/input.c37
-rw-r--r--drivers/input/misc/ixp4xx-beeper.c6
-rw-r--r--drivers/input/misc/rotary_encoder.c4
-rw-r--r--drivers/input/serio/serio.c11
-rw-r--r--drivers/input/tablet/wacom_sys.c2
-rw-r--r--drivers/input/touchscreen/ads7846.c38
-rw-r--r--drivers/input/touchscreen/wacom_w8001.c13
-rw-r--r--drivers/isdn/hisax/isdnl2.c28
-rw-r--r--drivers/isdn/hysdn/hysdn_defs.h2
-rw-r--r--drivers/isdn/hysdn/hysdn_init.c26
-rw-r--r--drivers/isdn/hysdn/hysdn_net.c3
-rw-r--r--drivers/isdn/hysdn/hysdn_procconf.c3
-rw-r--r--drivers/isdn/icn/icn.c3
-rw-r--r--drivers/md/md.c47
-rw-r--r--drivers/md/md.h2
-rw-r--r--drivers/md/raid0.c40
-rw-r--r--drivers/md/raid10.c6
-rw-r--r--drivers/md/raid5.c60
-rw-r--r--drivers/media/rc/ir-lirc-codec.c6
-rw-r--r--drivers/media/rc/keymaps/rc-rc6-mce.c6
-rw-r--r--drivers/media/rc/mceusb.c9
-rw-r--r--drivers/media/rc/nuvoton-cir.c6
-rw-r--r--drivers/media/rc/streamzap.c14
-rw-r--r--drivers/media/video/gspca/zc3xx.c31
-rw-r--r--drivers/media/video/hdpvr/hdpvr-core.c24
-rw-r--r--drivers/media/video/hdpvr/hdpvr-i2c.c30
-rw-r--r--drivers/media/video/hdpvr/hdpvr.h3
-rw-r--r--drivers/media/video/ir-kbd-i2c.c13
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-i2c-core.c1
-rw-r--r--drivers/media/video/saa7115.c2
-rw-r--r--drivers/memstick/core/memstick.c2
-rw-r--r--drivers/message/fusion/mptbase.h4
-rw-r--r--drivers/message/fusion/mptctl.c8
-rw-r--r--drivers/message/fusion/mptscsih.c7
-rw-r--r--drivers/misc/tifm_core.c2
-rw-r--r--drivers/misc/vmw_balloon.c2
-rw-r--r--drivers/mtd/nand/r852.c2
-rw-r--r--drivers/mtd/sm_ftl.c2
-rw-r--r--drivers/net/atl1c/atl1c_main.c1
-rw-r--r--drivers/net/benet/be_cmds.c5
-rw-r--r--drivers/net/benet/be_main.c4
-rw-r--r--drivers/net/bnx2x/bnx2x.h4
-rw-r--r--drivers/net/bnx2x/bnx2x_link.c65
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c59
-rw-r--r--drivers/net/can/Kconfig2
-rw-r--r--drivers/net/can/at91_can.c2
-rw-r--r--drivers/net/can/janz-ican3.c2
-rw-r--r--drivers/net/can/mcp251x.c2
-rw-r--r--drivers/net/can/mscan/Kconfig2
-rw-r--r--drivers/net/can/pch_can.c5
-rw-r--r--drivers/net/can/softing/Kconfig4
-rw-r--r--drivers/net/can/softing/softing_cs.c1
-rw-r--r--drivers/net/cxgb4vf/cxgb4vf_main.c80
-rw-r--r--drivers/net/cxgb4vf/t4vf_hw.c2
-rw-r--r--drivers/net/depca.c6
-rw-r--r--drivers/net/dl2k.c4
-rw-r--r--drivers/net/e1000/e1000_hw.c4
-rw-r--r--drivers/net/e1000/e1000_hw.h1
-rw-r--r--drivers/net/e1000e/netdev.c53
-rw-r--r--drivers/net/enc28j60.c2
-rw-r--r--drivers/net/forcedeth.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_common.c3
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.c53
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c22
-rw-r--r--drivers/net/ixgbe/ixgbe_sriov.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_x540.c6
-rw-r--r--drivers/net/mlx4/main.c15
-rw-r--r--drivers/net/niu.c61
-rw-r--r--drivers/net/pch_gbe/pch_gbe.h2
-rw-r--r--drivers/net/pch_gbe/pch_gbe_main.c116
-rw-r--r--drivers/net/pcmcia/axnet_cs.c6
-rw-r--r--drivers/net/r8169.c43
-rw-r--r--drivers/net/sis900.c1
-rw-r--r--drivers/net/stmmac/stmmac_main.c4
-rw-r--r--drivers/net/tg3.c8
-rw-r--r--drivers/net/usb/cdc_ncm.c227
-rw-r--r--drivers/net/usb/hso.c12
-rw-r--r--drivers/net/usb/usbnet.c4
-rw-r--r--drivers/net/virtio_net.c27
-rw-r--r--drivers/net/vxge/vxge-config.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/dma.c4
-rw-r--r--drivers/net/wireless/ath/ath5k/pcu.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_hw.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c21
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c21
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c7
-rw-r--r--drivers/net/wireless/ath/carl9170/rx.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c67
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c6
-rw-r--r--drivers/net/wireless/rtlwifi/efuse.c40
-rw-r--r--drivers/net/wireless/wl1251/main.c3
-rw-r--r--drivers/net/wireless/wl12xx/spi.c3
-rw-r--r--drivers/net/xen-netfront.c96
-rw-r--r--drivers/pci/pci-sysfs.c3
-rw-r--r--drivers/platform/x86/Kconfig2
-rw-r--r--drivers/platform/x86/acer-wmi.c4
-rw-r--r--drivers/platform/x86/asus_acpi.c8
-rw-r--r--drivers/platform/x86/dell-laptop.c24
-rw-r--r--drivers/platform/x86/intel_pmic_gpio.c116
-rw-r--r--drivers/platform/x86/tc1100-wmi.c2
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c8
-rw-r--r--drivers/rtc/Kconfig12
-rw-r--r--drivers/rtc/class.c1
-rw-r--r--drivers/rtc/interface.c26
-rw-r--r--drivers/rtc/rtc-at32ap700x.c19
-rw-r--r--drivers/rtc/rtc-at91rm9200.c20
-rw-r--r--drivers/rtc/rtc-at91sam9.c20
-rw-r--r--drivers/rtc/rtc-bfin.c21
-rw-r--r--drivers/rtc/rtc-dev.c125
-rw-r--r--drivers/rtc/rtc-ds1286.c41
-rw-r--r--drivers/rtc/rtc-ds1305.c43
-rw-r--r--drivers/rtc/rtc-ds1307.c49
-rw-r--r--drivers/rtc/rtc-ds1374.c37
-rw-r--r--drivers/rtc/rtc-m41t80.c30
-rw-r--r--drivers/rtc/rtc-m48t59.c21
-rw-r--r--drivers/rtc/rtc-mrst.c31
-rw-r--r--drivers/rtc/rtc-msm6242.c2
-rw-r--r--drivers/rtc/rtc-mv.c20
-rw-r--r--drivers/rtc/rtc-omap.c28
-rw-r--r--drivers/rtc/rtc-proc.c6
-rw-r--r--drivers/rtc/rtc-rp5c01.c2
-rw-r--r--drivers/rtc/rtc-rs5c372.c48
-rw-r--r--drivers/rtc/rtc-sa1100.c22
-rw-r--r--drivers/rtc/rtc-sh.c11
-rw-r--r--drivers/rtc/rtc-test.c21
-rw-r--r--drivers/rtc/rtc-vr41xx.c38
-rw-r--r--drivers/s390/block/dasd_alias.c6
-rw-r--r--drivers/s390/block/dasd_eckd.c2
-rw-r--r--drivers/s390/cio/qdio_main.c4
-rw-r--r--drivers/s390/net/netiucv.c2
-rw-r--r--drivers/s390/net/qeth_core_main.c149
-rw-r--r--drivers/s390/net/qeth_l2_main.c4
-rw-r--r--drivers/s390/net/smsgiucv.c2
-rw-r--r--drivers/scsi/arcmsr/arcmsr.h11
-rw-r--r--drivers/scsi/arcmsr/arcmsr_attr.c2
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c114
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c1
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c19
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c64
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c10
-rw-r--r--drivers/scsi/scsi_debug.c2
-rw-r--r--drivers/spi/pxa2xx_spi_pci.c61
-rw-r--r--drivers/spi/spi_sh_msiof.c6
-rw-r--r--drivers/ssb/pcmcia.c2
-rw-r--r--drivers/staging/brcm80211/sys/wl_mac80211.c12
-rw-r--r--drivers/staging/brcm80211/sys/wlc_mac80211.c13
-rw-r--r--drivers/staging/brcm80211/sys/wlc_pub.h2
-rw-r--r--drivers/staging/comedi/Kconfig5
-rw-r--r--drivers/staging/comedi/drivers/mite.c2
-rw-r--r--drivers/staging/comedi/drivers/ni_6527.c4
-rw-r--r--drivers/staging/comedi/drivers/ni_65xx.c4
-rw-r--r--drivers/staging/comedi/drivers/ni_660x.c4
-rw-r--r--drivers/staging/comedi/drivers/ni_670x.c4
-rw-r--r--drivers/staging/comedi/drivers/ni_pcidio.c4
-rw-r--r--drivers/staging/comedi/drivers/ni_pcimio.c4
-rw-r--r--drivers/staging/hv/netvsc_drv.c1
-rw-r--r--drivers/staging/intel_sst/intelmid_v2_control.c5
-rw-r--r--drivers/staging/lirc/lirc_zilog.c32
-rw-r--r--drivers/staging/zram/zram_drv.c4
-rw-r--r--drivers/target/Makefile3
-rw-r--r--drivers/target/target_core_configfs.c155
-rw-r--r--drivers/target/target_core_device.c13
-rw-r--r--drivers/target/target_core_fabric_configfs.c92
-rw-r--r--drivers/target/target_core_iblock.c8
-rw-r--r--drivers/target/target_core_mib.c1078
-rw-r--r--drivers/target/target_core_mib.h28
-rw-r--r--drivers/target/target_core_pscsi.c4
-rw-r--r--drivers/target/target_core_tpg.c29
-rw-r--r--drivers/target/target_core_transport.c48
-rw-r--r--drivers/tty/hvc/Makefile1
-rw-r--r--drivers/tty/n_gsm.c1
-rw-r--r--drivers/tty/serial/68328serial.c29
-rw-r--r--drivers/tty/serial/68360serial.c1
-rw-r--r--drivers/tty/serial/bfin_5xx.c15
-rw-r--r--drivers/tty/serial/max3100.c2
-rw-r--r--drivers/tty/serial/max3107.c2
-rw-r--r--drivers/tty/sysrq.c17
-rw-r--r--drivers/usb/class/cdc-acm.c1
-rw-r--r--drivers/usb/core/hcd.c2
-rw-r--r--drivers/usb/core/hub.c5
-rw-r--r--drivers/usb/gadget/Kconfig2
-rw-r--r--drivers/usb/gadget/f_mass_storage.c3
-rw-r--r--drivers/usb/gadget/r8a66597-udc.c2
-rw-r--r--drivers/usb/host/Kconfig2
-rw-r--r--drivers/usb/host/ehci-au1xxx.c2
-rw-r--r--drivers/usb/host/ehci-hub.c7
-rw-r--r--drivers/usb/host/ehci-omap.c6
-rw-r--r--drivers/usb/host/ehci-pci.c2
-rw-r--r--drivers/usb/host/sl811-hcd.c1
-rw-r--r--drivers/usb/musb/blackfin.c1
-rw-r--r--drivers/usb/musb/musb_core.c10
-rw-r--r--drivers/usb/musb/musb_core.h12
-rw-r--r--drivers/usb/musb/musb_dma.h3
-rw-r--r--drivers/usb/musb/musb_gadget.c71
-rw-r--r--drivers/usb/musb/musb_gadget.h8
-rw-r--r--drivers/usb/musb/musb_host.c11
-rw-r--r--drivers/usb/musb/musbhsdma.h19
-rw-r--r--drivers/usb/otg/Kconfig2
-rw-r--r--drivers/usb/serial/ftdi_sio.c27
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h12
-rw-r--r--drivers/usb/serial/io_edgeport.c4
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c2
-rw-r--r--drivers/usb/storage/unusual_devs.h14
-rw-r--r--drivers/vhost/net.c9
-rw-r--r--drivers/vhost/vhost.h6
-rw-r--r--drivers/w1/masters/omap_hdq.c28
-rw-r--r--drivers/watchdog/Kconfig6
-rw-r--r--drivers/watchdog/Makefile2
-rw-r--r--drivers/watchdog/m54xx_wdt.c (renamed from drivers/watchdog/m548x_wdt.c)50
-rw-r--r--drivers/xen/manage.c10
321 files changed, 3864 insertions, 3474 deletions
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index e9562a7cb2f9..3b20a3401b64 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -212,37 +212,40 @@ acpi_setup_gpe_for_wake(acpi_handle wake_device,
212 return_ACPI_STATUS(AE_BAD_PARAMETER); 212 return_ACPI_STATUS(AE_BAD_PARAMETER);
213 } 213 }
214 214
215 /* Validate wake_device is of type Device */
216
217 device_node = ACPI_CAST_PTR(struct acpi_namespace_node, wake_device);
218 if (device_node->type != ACPI_TYPE_DEVICE) {
219 return_ACPI_STATUS(AE_BAD_PARAMETER);
220 }
221
222 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); 215 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
223 216
224 /* Ensure that we have a valid GPE number */ 217 /* Ensure that we have a valid GPE number */
225 218
226 gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); 219 gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
227 if (gpe_event_info) { 220 if (!gpe_event_info) {
228 /* 221 goto unlock_and_exit;
229 * If there is no method or handler for this GPE, then the 222 }
230 * wake_device will be notified whenever this GPE fires (aka 223
231 * "implicit notify") Note: The GPE is assumed to be 224 /*
232 * level-triggered (for windows compatibility). 225 * If there is no method or handler for this GPE, then the
233 */ 226 * wake_device will be notified whenever this GPE fires (aka
234 if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == 227 * "implicit notify") Note: The GPE is assumed to be
235 ACPI_GPE_DISPATCH_NONE) { 228 * level-triggered (for windows compatibility).
236 gpe_event_info->flags = 229 */
237 (ACPI_GPE_DISPATCH_NOTIFY | 230 if (((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
238 ACPI_GPE_LEVEL_TRIGGERED); 231 ACPI_GPE_DISPATCH_NONE) && (wake_device != ACPI_ROOT_OBJECT)) {
239 gpe_event_info->dispatch.device_node = device_node;
240 }
241 232
242 gpe_event_info->flags |= ACPI_GPE_CAN_WAKE; 233 /* Validate wake_device is of type Device */
243 status = AE_OK; 234
235 device_node = ACPI_CAST_PTR(struct acpi_namespace_node,
236 wake_device);
237 if (device_node->type != ACPI_TYPE_DEVICE) {
238 goto unlock_and_exit;
239 }
240 gpe_event_info->flags = (ACPI_GPE_DISPATCH_NOTIFY |
241 ACPI_GPE_LEVEL_TRIGGERED);
242 gpe_event_info->dispatch.device_node = device_node;
244 } 243 }
245 244
245 gpe_event_info->flags |= ACPI_GPE_CAN_WAKE;
246 status = AE_OK;
247
248 unlock_and_exit:
246 acpi_os_release_lock(acpi_gbl_gpe_lock, flags); 249 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
247 return_ACPI_STATUS(status); 250 return_ACPI_STATUS(status);
248} 251}
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index b0931818cf98..c90c76aa7f8b 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -636,17 +636,21 @@ EXPORT_SYMBOL(acpi_os_write_port);
636acpi_status 636acpi_status
637acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width) 637acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
638{ 638{
639 u32 dummy;
640 void __iomem *virt_addr; 639 void __iomem *virt_addr;
641 int size = width / 8, unmap = 0; 640 unsigned int size = width / 8;
641 bool unmap = false;
642 u32 dummy;
642 643
643 rcu_read_lock(); 644 rcu_read_lock();
644 virt_addr = acpi_map_vaddr_lookup(phys_addr, size); 645 virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
645 rcu_read_unlock();
646 if (!virt_addr) { 646 if (!virt_addr) {
647 rcu_read_unlock();
647 virt_addr = acpi_os_ioremap(phys_addr, size); 648 virt_addr = acpi_os_ioremap(phys_addr, size);
648 unmap = 1; 649 if (!virt_addr)
650 return AE_BAD_ADDRESS;
651 unmap = true;
649 } 652 }
653
650 if (!value) 654 if (!value)
651 value = &dummy; 655 value = &dummy;
652 656
@@ -666,6 +670,8 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
666 670
667 if (unmap) 671 if (unmap)
668 iounmap(virt_addr); 672 iounmap(virt_addr);
673 else
674 rcu_read_unlock();
669 675
670 return AE_OK; 676 return AE_OK;
671} 677}
@@ -674,14 +680,17 @@ acpi_status
674acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width) 680acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
675{ 681{
676 void __iomem *virt_addr; 682 void __iomem *virt_addr;
677 int size = width / 8, unmap = 0; 683 unsigned int size = width / 8;
684 bool unmap = false;
678 685
679 rcu_read_lock(); 686 rcu_read_lock();
680 virt_addr = acpi_map_vaddr_lookup(phys_addr, size); 687 virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
681 rcu_read_unlock();
682 if (!virt_addr) { 688 if (!virt_addr) {
689 rcu_read_unlock();
683 virt_addr = acpi_os_ioremap(phys_addr, size); 690 virt_addr = acpi_os_ioremap(phys_addr, size);
684 unmap = 1; 691 if (!virt_addr)
692 return AE_BAD_ADDRESS;
693 unmap = true;
685 } 694 }
686 695
687 switch (width) { 696 switch (width) {
@@ -700,6 +709,8 @@ acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
700 709
701 if (unmap) 710 if (unmap)
702 iounmap(virt_addr); 711 iounmap(virt_addr);
712 else
713 rcu_read_unlock();
703 714
704 return AE_OK; 715 return AE_OK;
705} 716}
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 42d3d72dae85..5af3479714f6 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -82,6 +82,11 @@ long acpi_is_video_device(struct acpi_device *device)
82 if (!device) 82 if (!device)
83 return 0; 83 return 0;
84 84
85 /* Is this device able to support video switching ? */
86 if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOD", &h_dummy)) ||
87 ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOS", &h_dummy)))
88 video_caps |= ACPI_VIDEO_OUTPUT_SWITCHING;
89
85 /* Is this device able to retrieve a video ROM ? */ 90 /* Is this device able to retrieve a video ROM ? */
86 if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_ROM", &h_dummy))) 91 if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_ROM", &h_dummy)))
87 video_caps |= ACPI_VIDEO_ROM_AVAILABLE; 92 video_caps |= ACPI_VIDEO_ROM_AVAILABLE;
diff --git a/drivers/acpi/wakeup.c b/drivers/acpi/wakeup.c
index ed6501452507..7bfbe40bc43b 100644
--- a/drivers/acpi/wakeup.c
+++ b/drivers/acpi/wakeup.c
@@ -86,8 +86,12 @@ int __init acpi_wakeup_device_init(void)
86 struct acpi_device *dev = container_of(node, 86 struct acpi_device *dev = container_of(node,
87 struct acpi_device, 87 struct acpi_device,
88 wakeup_list); 88 wakeup_list);
89 if (device_can_wakeup(&dev->dev)) 89 if (device_can_wakeup(&dev->dev)) {
90 /* Button GPEs are supposed to be always enabled. */
91 acpi_enable_gpe(dev->wakeup.gpe_device,
92 dev->wakeup.gpe_number);
90 device_set_wakeup_enable(&dev->dev, true); 93 device_set_wakeup_enable(&dev->dev, true);
94 }
91 } 95 }
92 mutex_unlock(&acpi_device_lock); 96 mutex_unlock(&acpi_device_lock);
93 return 0; 97 return 0;
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index 73fb1c4f4cd4..25ef1a4556e6 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -866,8 +866,9 @@ static int popen(struct atm_vcc *vcc)
866 } 866 }
867 867
868 skb = alloc_skb(sizeof(*header), GFP_ATOMIC); 868 skb = alloc_skb(sizeof(*header), GFP_ATOMIC);
869 if (!skb && net_ratelimit()) { 869 if (!skb) {
870 dev_warn(&card->dev->dev, "Failed to allocate sk_buff in popen()\n"); 870 if (net_ratelimit())
871 dev_warn(&card->dev->dev, "Failed to allocate sk_buff in popen()\n");
871 return -ENOMEM; 872 return -ENOMEM;
872 } 873 }
873 header = (void *)skb_put(skb, sizeof(*header)); 874 header = (void *)skb_put(skb, sizeof(*header));
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index d7f463d6312d..40528ba56d1b 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -39,4 +39,4 @@ obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += xen-blkfront.o
39obj-$(CONFIG_BLK_DEV_DRBD) += drbd/ 39obj-$(CONFIG_BLK_DEV_DRBD) += drbd/
40obj-$(CONFIG_BLK_DEV_RBD) += rbd.o 40obj-$(CONFIG_BLK_DEV_RBD) += rbd.o
41 41
42swim_mod-objs := swim.o swim_asm.o 42swim_mod-y := swim.o swim_asm.o
diff --git a/drivers/block/aoe/Makefile b/drivers/block/aoe/Makefile
index e76d997183c6..06ea82cdf27d 100644
--- a/drivers/block/aoe/Makefile
+++ b/drivers/block/aoe/Makefile
@@ -3,4 +3,4 @@
3# 3#
4 4
5obj-$(CONFIG_ATA_OVER_ETH) += aoe.o 5obj-$(CONFIG_ATA_OVER_ETH) += aoe.o
6aoe-objs := aoeblk.o aoechr.o aoecmd.o aoedev.o aoemain.o aoenet.o 6aoe-y := aoeblk.o aoechr.o aoecmd.o aoedev.o aoemain.o aoenet.o
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 516d5bbec2b6..9279272b3732 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -2833,7 +2833,7 @@ static int cciss_revalidate(struct gendisk *disk)
2833 sector_t total_size; 2833 sector_t total_size;
2834 InquiryData_struct *inq_buff = NULL; 2834 InquiryData_struct *inq_buff = NULL;
2835 2835
2836 for (logvol = 0; logvol < CISS_MAX_LUN; logvol++) { 2836 for (logvol = 0; logvol <= h->highest_lun; logvol++) {
2837 if (!h->drv[logvol]) 2837 if (!h->drv[logvol])
2838 continue; 2838 continue;
2839 if (memcmp(h->drv[logvol]->LunID, drv->LunID, 2839 if (memcmp(h->drv[logvol]->LunID, drv->LunID,
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 44e18c073c44..49e6a545eb63 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1641,6 +1641,9 @@ out:
1641 1641
1642static void loop_free(struct loop_device *lo) 1642static void loop_free(struct loop_device *lo)
1643{ 1643{
1644 if (!lo->lo_queue->queue_lock)
1645 lo->lo_queue->queue_lock = &lo->lo_queue->__queue_lock;
1646
1644 blk_cleanup_queue(lo->lo_queue); 1647 blk_cleanup_queue(lo->lo_queue);
1645 put_disk(lo->lo_disk); 1648 put_disk(lo->lo_disk);
1646 list_del(&lo->lo_list); 1649 list_del(&lo->lo_list);
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index a32fb41246f8..e6fc716aca45 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -53,7 +53,6 @@
53#define DBG_BLKDEV 0x0100 53#define DBG_BLKDEV 0x0100
54#define DBG_RX 0x0200 54#define DBG_RX 0x0200
55#define DBG_TX 0x0400 55#define DBG_TX 0x0400
56static DEFINE_MUTEX(nbd_mutex);
57static unsigned int debugflags; 56static unsigned int debugflags;
58#endif /* NDEBUG */ 57#endif /* NDEBUG */
59 58
@@ -718,11 +717,9 @@ static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
718 dprintk(DBG_IOCTL, "%s: nbd_ioctl cmd=%s(0x%x) arg=%lu\n", 717 dprintk(DBG_IOCTL, "%s: nbd_ioctl cmd=%s(0x%x) arg=%lu\n",
719 lo->disk->disk_name, ioctl_cmd_to_ascii(cmd), cmd, arg); 718 lo->disk->disk_name, ioctl_cmd_to_ascii(cmd), cmd, arg);
720 719
721 mutex_lock(&nbd_mutex);
722 mutex_lock(&lo->tx_lock); 720 mutex_lock(&lo->tx_lock);
723 error = __nbd_ioctl(bdev, lo, cmd, arg); 721 error = __nbd_ioctl(bdev, lo, cmd, arg);
724 mutex_unlock(&lo->tx_lock); 722 mutex_unlock(&lo->tx_lock);
725 mutex_unlock(&nbd_mutex);
726 723
727 return error; 724 return error;
728} 725}
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index a126e614601f..333c21289d97 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -39,6 +39,8 @@ static struct usb_device_id ath3k_table[] = {
39 /* Atheros AR3011 with sflash firmware*/ 39 /* Atheros AR3011 with sflash firmware*/
40 { USB_DEVICE(0x0CF3, 0x3002) }, 40 { USB_DEVICE(0x0CF3, 0x3002) },
41 41
42 /* Atheros AR9285 Malbec with sflash firmware */
43 { USB_DEVICE(0x03F0, 0x311D) },
42 { } /* Terminating entry */ 44 { } /* Terminating entry */
43}; 45};
44 46
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 1da773f899a2..4cefa91e6c34 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -102,6 +102,9 @@ static struct usb_device_id blacklist_table[] = {
102 /* Atheros 3011 with sflash firmware */ 102 /* Atheros 3011 with sflash firmware */
103 { USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE }, 103 { USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE },
104 104
105 /* Atheros AR9285 Malbec with sflash firmware */
106 { USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE },
107
105 /* Broadcom BCM2035 */ 108 /* Broadcom BCM2035 */
106 { USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU }, 109 { USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU },
107 { USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU }, 110 { USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU },
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 14033a36bcd0..e2c48a7eccff 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -409,7 +409,8 @@ int register_cdrom(struct cdrom_device_info *cdi)
409 } 409 }
410 410
411 ENSURE(drive_status, CDC_DRIVE_STATUS ); 411 ENSURE(drive_status, CDC_DRIVE_STATUS );
412 ENSURE(media_changed, CDC_MEDIA_CHANGED); 412 if (cdo->check_events == NULL && cdo->media_changed == NULL)
413 *change_capability = ~(CDC_MEDIA_CHANGED | CDC_SELECT_DISC);
413 ENSURE(tray_move, CDC_CLOSE_TRAY | CDC_OPEN_TRAY); 414 ENSURE(tray_move, CDC_CLOSE_TRAY | CDC_OPEN_TRAY);
414 ENSURE(lock_door, CDC_LOCK); 415 ENSURE(lock_door, CDC_LOCK);
415 ENSURE(select_speed, CDC_SELECT_SPEED); 416 ENSURE(select_speed, CDC_SELECT_SPEED);
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 5bc765d4c3ca..8238f89f73c9 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -30,6 +30,7 @@ obj-$(CONFIG_SYNCLINK_GT) += synclink_gt.o
30obj-$(CONFIG_AMIGA_BUILTIN_SERIAL) += amiserial.o 30obj-$(CONFIG_AMIGA_BUILTIN_SERIAL) += amiserial.o
31obj-$(CONFIG_SX) += sx.o generic_serial.o 31obj-$(CONFIG_SX) += sx.o generic_serial.o
32obj-$(CONFIG_RIO) += rio/ generic_serial.o 32obj-$(CONFIG_RIO) += rio/ generic_serial.o
33obj-$(CONFIG_VIRTIO_CONSOLE) += virtio_console.o
33obj-$(CONFIG_RAW_DRIVER) += raw.o 34obj-$(CONFIG_RAW_DRIVER) += raw.o
34obj-$(CONFIG_SGI_SNSC) += snsc.o snsc_event.o 35obj-$(CONFIG_SGI_SNSC) += snsc.o snsc_event.o
35obj-$(CONFIG_MSPEC) += mspec.o 36obj-$(CONFIG_MSPEC) += mspec.o
diff --git a/drivers/char/agp/Kconfig b/drivers/char/agp/Kconfig
index fcd867d923ba..d8b1b576556c 100644
--- a/drivers/char/agp/Kconfig
+++ b/drivers/char/agp/Kconfig
@@ -50,7 +50,7 @@ config AGP_ATI
50 50
51config AGP_AMD 51config AGP_AMD
52 tristate "AMD Irongate, 761, and 762 chipset support" 52 tristate "AMD Irongate, 761, and 762 chipset support"
53 depends on AGP && (X86_32 || ALPHA) 53 depends on AGP && X86_32
54 help 54 help
55 This option gives you AGP support for the GLX component of 55 This option gives you AGP support for the GLX component of
56 X on AMD Irongate, 761, and 762 chipsets. 56 X on AMD Irongate, 761, and 762 chipsets.
diff --git a/drivers/char/agp/amd-k7-agp.c b/drivers/char/agp/amd-k7-agp.c
index b1b4362bc648..45681c0ff3b6 100644
--- a/drivers/char/agp/amd-k7-agp.c
+++ b/drivers/char/agp/amd-k7-agp.c
@@ -41,22 +41,8 @@ static int amd_create_page_map(struct amd_page_map *page_map)
41 if (page_map->real == NULL) 41 if (page_map->real == NULL)
42 return -ENOMEM; 42 return -ENOMEM;
43 43
44#ifndef CONFIG_X86
45 SetPageReserved(virt_to_page(page_map->real));
46 global_cache_flush();
47 page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real),
48 PAGE_SIZE);
49 if (page_map->remapped == NULL) {
50 ClearPageReserved(virt_to_page(page_map->real));
51 free_page((unsigned long) page_map->real);
52 page_map->real = NULL;
53 return -ENOMEM;
54 }
55 global_cache_flush();
56#else
57 set_memory_uc((unsigned long)page_map->real, 1); 44 set_memory_uc((unsigned long)page_map->real, 1);
58 page_map->remapped = page_map->real; 45 page_map->remapped = page_map->real;
59#endif
60 46
61 for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) { 47 for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) {
62 writel(agp_bridge->scratch_page, page_map->remapped+i); 48 writel(agp_bridge->scratch_page, page_map->remapped+i);
@@ -68,12 +54,7 @@ static int amd_create_page_map(struct amd_page_map *page_map)
68 54
69static void amd_free_page_map(struct amd_page_map *page_map) 55static void amd_free_page_map(struct amd_page_map *page_map)
70{ 56{
71#ifndef CONFIG_X86
72 iounmap(page_map->remapped);
73 ClearPageReserved(virt_to_page(page_map->real));
74#else
75 set_memory_wb((unsigned long)page_map->real, 1); 57 set_memory_wb((unsigned long)page_map->real, 1);
76#endif
77 free_page((unsigned long) page_map->real); 58 free_page((unsigned long) page_map->real);
78} 59}
79 60
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 857df10c0428..b0a0dccc98c1 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -774,20 +774,14 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
774 dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name); 774 dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name);
775 775
776 /* 776 /*
777 * If the device has not been properly setup, the following will catch
778 * the problem and should stop the system from crashing.
779 * 20030610 - hamish@zot.org
780 */
781 if (pci_enable_device(pdev)) {
782 dev_err(&pdev->dev, "can't enable PCI device\n");
783 agp_put_bridge(bridge);
784 return -ENODEV;
785 }
786
787 /*
788 * The following fixes the case where the BIOS has "forgotten" to 777 * The following fixes the case where the BIOS has "forgotten" to
789 * provide an address range for the GART. 778 * provide an address range for the GART.
790 * 20030610 - hamish@zot.org 779 * 20030610 - hamish@zot.org
780 * This happens before pci_enable_device() intentionally;
781 * calling pci_enable_device() before assigning the resource
782 * will result in the GART being disabled on machines with such
783 * BIOSs (the GART ends up with a BAR starting at 0, which
784 * conflicts a lot of other devices).
791 */ 785 */
792 r = &pdev->resource[0]; 786 r = &pdev->resource[0];
793 if (!r->start && r->end) { 787 if (!r->start && r->end) {
@@ -798,6 +792,17 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
798 } 792 }
799 } 793 }
800 794
795 /*
796 * If the device has not been properly setup, the following will catch
797 * the problem and should stop the system from crashing.
798 * 20030610 - hamish@zot.org
799 */
800 if (pci_enable_device(pdev)) {
801 dev_err(&pdev->dev, "can't enable PCI device\n");
802 agp_put_bridge(bridge);
803 return -ENODEV;
804 }
805
801 /* Fill in the mode register */ 806 /* Fill in the mode register */
802 if (cap_ptr) { 807 if (cap_ptr) {
803 pci_read_config_dword(pdev, 808 pci_read_config_dword(pdev,
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index b6ae6e9a9c5f..7855f9f45b8e 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -320,6 +320,7 @@ static int unload_when_empty = 1;
320static int add_smi(struct smi_info *smi); 320static int add_smi(struct smi_info *smi);
321static int try_smi_init(struct smi_info *smi); 321static int try_smi_init(struct smi_info *smi);
322static void cleanup_one_si(struct smi_info *to_clean); 322static void cleanup_one_si(struct smi_info *to_clean);
323static void cleanup_ipmi_si(void);
323 324
324static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list); 325static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
325static int register_xaction_notifier(struct notifier_block *nb) 326static int register_xaction_notifier(struct notifier_block *nb)
@@ -3450,16 +3451,7 @@ static int __devinit init_ipmi_si(void)
3450 mutex_lock(&smi_infos_lock); 3451 mutex_lock(&smi_infos_lock);
3451 if (unload_when_empty && list_empty(&smi_infos)) { 3452 if (unload_when_empty && list_empty(&smi_infos)) {
3452 mutex_unlock(&smi_infos_lock); 3453 mutex_unlock(&smi_infos_lock);
3453#ifdef CONFIG_PCI 3454 cleanup_ipmi_si();
3454 if (pci_registered)
3455 pci_unregister_driver(&ipmi_pci_driver);
3456#endif
3457
3458#ifdef CONFIG_PPC_OF
3459 if (of_registered)
3460 of_unregister_platform_driver(&ipmi_of_platform_driver);
3461#endif
3462 driver_unregister(&ipmi_driver.driver);
3463 printk(KERN_WARNING PFX 3455 printk(KERN_WARNING PFX
3464 "Unable to find any System Interface(s)\n"); 3456 "Unable to find any System Interface(s)\n");
3465 return -ENODEV; 3457 return -ENODEV;
diff --git a/drivers/tty/hvc/virtio_console.c b/drivers/char/virtio_console.c
index 896a2ced1d27..490393186338 100644
--- a/drivers/tty/hvc/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1,6 +1,7 @@
1/* 1/*
2 * Copyright (C) 2006, 2007, 2009 Rusty Russell, IBM Corporation 2 * Copyright (C) 2006, 2007, 2009 Rusty Russell, IBM Corporation
3 * Copyright (C) 2009, 2010 Red Hat, Inc. 3 * Copyright (C) 2009, 2010, 2011 Red Hat, Inc.
4 * Copyright (C) 2009, 2010, 2011 Amit Shah <amit.shah@redhat.com>
4 * 5 *
5 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
@@ -31,7 +32,7 @@
31#include <linux/virtio_console.h> 32#include <linux/virtio_console.h>
32#include <linux/wait.h> 33#include <linux/wait.h>
33#include <linux/workqueue.h> 34#include <linux/workqueue.h>
34#include "hvc_console.h" 35#include "../tty/hvc/hvc_console.h"
35 36
36/* 37/*
37 * This is a global struct for storing common data for all the devices 38 * This is a global struct for storing common data for all the devices
@@ -1462,6 +1463,17 @@ static void control_work_handler(struct work_struct *work)
1462 spin_unlock(&portdev->cvq_lock); 1463 spin_unlock(&portdev->cvq_lock);
1463} 1464}
1464 1465
1466static void out_intr(struct virtqueue *vq)
1467{
1468 struct port *port;
1469
1470 port = find_port_by_vq(vq->vdev->priv, vq);
1471 if (!port)
1472 return;
1473
1474 wake_up_interruptible(&port->waitqueue);
1475}
1476
1465static void in_intr(struct virtqueue *vq) 1477static void in_intr(struct virtqueue *vq)
1466{ 1478{
1467 struct port *port; 1479 struct port *port;
@@ -1566,7 +1578,7 @@ static int init_vqs(struct ports_device *portdev)
1566 */ 1578 */
1567 j = 0; 1579 j = 0;
1568 io_callbacks[j] = in_intr; 1580 io_callbacks[j] = in_intr;
1569 io_callbacks[j + 1] = NULL; 1581 io_callbacks[j + 1] = out_intr;
1570 io_names[j] = "input"; 1582 io_names[j] = "input";
1571 io_names[j + 1] = "output"; 1583 io_names[j + 1] = "output";
1572 j += 2; 1584 j += 2;
@@ -1580,7 +1592,7 @@ static int init_vqs(struct ports_device *portdev)
1580 for (i = 1; i < nr_ports; i++) { 1592 for (i = 1; i < nr_ports; i++) {
1581 j += 2; 1593 j += 2;
1582 io_callbacks[j] = in_intr; 1594 io_callbacks[j] = in_intr;
1583 io_callbacks[j + 1] = NULL; 1595 io_callbacks[j + 1] = out_intr;
1584 io_names[j] = "input"; 1596 io_names[j] = "input";
1585 io_names[j + 1] = "output"; 1597 io_names[j + 1] = "output";
1586 } 1598 }
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 297f48b0cba9..07bca4970e50 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -79,6 +79,7 @@
79#include <linux/module.h> 79#include <linux/module.h>
80#include <linux/interrupt.h> 80#include <linux/interrupt.h>
81#include <linux/slab.h> 81#include <linux/slab.h>
82#include <linux/delay.h>
82#include <linux/dmapool.h> 83#include <linux/dmapool.h>
83#include <linux/dmaengine.h> 84#include <linux/dmaengine.h>
84#include <linux/amba/bus.h> 85#include <linux/amba/bus.h>
@@ -235,16 +236,19 @@ static void pl08x_start_txd(struct pl08x_dma_chan *plchan,
235} 236}
236 237
237/* 238/*
238 * Overall DMAC remains enabled always. 239 * Pause the channel by setting the HALT bit.
239 * 240 *
240 * Disabling individual channels could lose data. 241 * For M->P transfers, pause the DMAC first and then stop the peripheral -
242 * the FIFO can only drain if the peripheral is still requesting data.
243 * (note: this can still timeout if the DMAC FIFO never drains of data.)
241 * 244 *
242 * Disable the peripheral DMA after disabling the DMAC in order to allow 245 * For P->M transfers, disable the peripheral first to stop it filling
243 * the DMAC FIFO to drain, and hence allow the channel to show inactive 246 * the DMAC FIFO, and then pause the DMAC.
244 */ 247 */
245static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch) 248static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch)
246{ 249{
247 u32 val; 250 u32 val;
251 int timeout;
248 252
249 /* Set the HALT bit and wait for the FIFO to drain */ 253 /* Set the HALT bit and wait for the FIFO to drain */
250 val = readl(ch->base + PL080_CH_CONFIG); 254 val = readl(ch->base + PL080_CH_CONFIG);
@@ -252,8 +256,13 @@ static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch)
252 writel(val, ch->base + PL080_CH_CONFIG); 256 writel(val, ch->base + PL080_CH_CONFIG);
253 257
254 /* Wait for channel inactive */ 258 /* Wait for channel inactive */
255 while (pl08x_phy_channel_busy(ch)) 259 for (timeout = 1000; timeout; timeout--) {
256 cpu_relax(); 260 if (!pl08x_phy_channel_busy(ch))
261 break;
262 udelay(1);
263 }
264 if (pl08x_phy_channel_busy(ch))
265 pr_err("pl08x: channel%u timeout waiting for pause\n", ch->id);
257} 266}
258 267
259static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch) 268static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch)
@@ -267,19 +276,24 @@ static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch)
267} 276}
268 277
269 278
270/* Stops the channel */ 279/*
271static void pl08x_stop_phy_chan(struct pl08x_phy_chan *ch) 280 * pl08x_terminate_phy_chan() stops the channel, clears the FIFO and
281 * clears any pending interrupt status. This should not be used for
282 * an on-going transfer, but as a method of shutting down a channel
283 * (eg, when it's no longer used) or terminating a transfer.
284 */
285static void pl08x_terminate_phy_chan(struct pl08x_driver_data *pl08x,
286 struct pl08x_phy_chan *ch)
272{ 287{
273 u32 val; 288 u32 val = readl(ch->base + PL080_CH_CONFIG);
274 289
275 pl08x_pause_phy_chan(ch); 290 val &= ~(PL080_CONFIG_ENABLE | PL080_CONFIG_ERR_IRQ_MASK |
291 PL080_CONFIG_TC_IRQ_MASK);
276 292
277 /* Disable channel */
278 val = readl(ch->base + PL080_CH_CONFIG);
279 val &= ~PL080_CONFIG_ENABLE;
280 val &= ~PL080_CONFIG_ERR_IRQ_MASK;
281 val &= ~PL080_CONFIG_TC_IRQ_MASK;
282 writel(val, ch->base + PL080_CH_CONFIG); 293 writel(val, ch->base + PL080_CH_CONFIG);
294
295 writel(1 << ch->id, pl08x->base + PL080_ERR_CLEAR);
296 writel(1 << ch->id, pl08x->base + PL080_TC_CLEAR);
283} 297}
284 298
285static inline u32 get_bytes_in_cctl(u32 cctl) 299static inline u32 get_bytes_in_cctl(u32 cctl)
@@ -404,13 +418,12 @@ static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x,
404{ 418{
405 unsigned long flags; 419 unsigned long flags;
406 420
421 spin_lock_irqsave(&ch->lock, flags);
422
407 /* Stop the channel and clear its interrupts */ 423 /* Stop the channel and clear its interrupts */
408 pl08x_stop_phy_chan(ch); 424 pl08x_terminate_phy_chan(pl08x, ch);
409 writel((1 << ch->id), pl08x->base + PL080_ERR_CLEAR);
410 writel((1 << ch->id), pl08x->base + PL080_TC_CLEAR);
411 425
412 /* Mark it as free */ 426 /* Mark it as free */
413 spin_lock_irqsave(&ch->lock, flags);
414 ch->serving = NULL; 427 ch->serving = NULL;
415 spin_unlock_irqrestore(&ch->lock, flags); 428 spin_unlock_irqrestore(&ch->lock, flags);
416} 429}
@@ -1449,7 +1462,7 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1449 plchan->state = PL08X_CHAN_IDLE; 1462 plchan->state = PL08X_CHAN_IDLE;
1450 1463
1451 if (plchan->phychan) { 1464 if (plchan->phychan) {
1452 pl08x_stop_phy_chan(plchan->phychan); 1465 pl08x_terminate_phy_chan(pl08x, plchan->phychan);
1453 1466
1454 /* 1467 /*
1455 * Mark physical channel as free and free any slave 1468 * Mark physical channel as free and free any slave
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index e53d438142bb..e18eaabe92b9 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -49,6 +49,7 @@ struct imxdma_channel {
49 49
50struct imxdma_engine { 50struct imxdma_engine {
51 struct device *dev; 51 struct device *dev;
52 struct device_dma_parameters dma_parms;
52 struct dma_device dma_device; 53 struct dma_device dma_device;
53 struct imxdma_channel channel[MAX_DMA_CHANNELS]; 54 struct imxdma_channel channel[MAX_DMA_CHANNELS];
54}; 55};
@@ -242,6 +243,21 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
242 else 243 else
243 dmamode = DMA_MODE_WRITE; 244 dmamode = DMA_MODE_WRITE;
244 245
246 switch (imxdmac->word_size) {
247 case DMA_SLAVE_BUSWIDTH_4_BYTES:
248 if (sgl->length & 3 || sgl->dma_address & 3)
249 return NULL;
250 break;
251 case DMA_SLAVE_BUSWIDTH_2_BYTES:
252 if (sgl->length & 1 || sgl->dma_address & 1)
253 return NULL;
254 break;
255 case DMA_SLAVE_BUSWIDTH_1_BYTE:
256 break;
257 default:
258 return NULL;
259 }
260
245 ret = imx_dma_setup_sg(imxdmac->imxdma_channel, sgl, sg_len, 261 ret = imx_dma_setup_sg(imxdmac->imxdma_channel, sgl, sg_len,
246 dma_length, imxdmac->per_address, dmamode); 262 dma_length, imxdmac->per_address, dmamode);
247 if (ret) 263 if (ret)
@@ -329,6 +345,9 @@ static int __init imxdma_probe(struct platform_device *pdev)
329 345
330 INIT_LIST_HEAD(&imxdma->dma_device.channels); 346 INIT_LIST_HEAD(&imxdma->dma_device.channels);
331 347
348 dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask);
349 dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask);
350
332 /* Initialize channel parameters */ 351 /* Initialize channel parameters */
333 for (i = 0; i < MAX_DMA_CHANNELS; i++) { 352 for (i = 0; i < MAX_DMA_CHANNELS; i++) {
334 struct imxdma_channel *imxdmac = &imxdma->channel[i]; 353 struct imxdma_channel *imxdmac = &imxdma->channel[i];
@@ -346,11 +365,7 @@ static int __init imxdma_probe(struct platform_device *pdev)
346 imxdmac->imxdma = imxdma; 365 imxdmac->imxdma = imxdma;
347 spin_lock_init(&imxdmac->lock); 366 spin_lock_init(&imxdmac->lock);
348 367
349 dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask);
350 dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask);
351
352 imxdmac->chan.device = &imxdma->dma_device; 368 imxdmac->chan.device = &imxdma->dma_device;
353 imxdmac->chan.chan_id = i;
354 imxdmac->channel = i; 369 imxdmac->channel = i;
355 370
356 /* Add the channel to the DMAC list */ 371 /* Add the channel to the DMAC list */
@@ -370,6 +385,9 @@ static int __init imxdma_probe(struct platform_device *pdev)
370 385
371 platform_set_drvdata(pdev, imxdma); 386 platform_set_drvdata(pdev, imxdma);
372 387
388 imxdma->dma_device.dev->dma_parms = &imxdma->dma_parms;
389 dma_set_max_seg_size(imxdma->dma_device.dev, 0xffffff);
390
373 ret = dma_async_device_register(&imxdma->dma_device); 391 ret = dma_async_device_register(&imxdma->dma_device);
374 if (ret) { 392 if (ret) {
375 dev_err(&pdev->dev, "unable to register\n"); 393 dev_err(&pdev->dev, "unable to register\n");
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index d5a5d4d9c19b..b6d1455fa936 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -230,7 +230,7 @@ struct sdma_engine;
230 * struct sdma_channel - housekeeping for a SDMA channel 230 * struct sdma_channel - housekeeping for a SDMA channel
231 * 231 *
232 * @sdma pointer to the SDMA engine for this channel 232 * @sdma pointer to the SDMA engine for this channel
233 * @channel the channel number, matches dmaengine chan_id 233 * @channel the channel number, matches dmaengine chan_id + 1
234 * @direction transfer type. Needed for setting SDMA script 234 * @direction transfer type. Needed for setting SDMA script
235 * @peripheral_type Peripheral type. Needed for setting SDMA script 235 * @peripheral_type Peripheral type. Needed for setting SDMA script
236 * @event_id0 aka dma request line 236 * @event_id0 aka dma request line
@@ -301,6 +301,7 @@ struct sdma_firmware_header {
301 301
302struct sdma_engine { 302struct sdma_engine {
303 struct device *dev; 303 struct device *dev;
304 struct device_dma_parameters dma_parms;
304 struct sdma_channel channel[MAX_DMA_CHANNELS]; 305 struct sdma_channel channel[MAX_DMA_CHANNELS];
305 struct sdma_channel_control *channel_control; 306 struct sdma_channel_control *channel_control;
306 void __iomem *regs; 307 void __iomem *regs;
@@ -449,7 +450,7 @@ static void sdma_handle_channel_loop(struct sdma_channel *sdmac)
449 if (bd->mode.status & BD_RROR) 450 if (bd->mode.status & BD_RROR)
450 sdmac->status = DMA_ERROR; 451 sdmac->status = DMA_ERROR;
451 else 452 else
452 sdmac->status = DMA_SUCCESS; 453 sdmac->status = DMA_IN_PROGRESS;
453 454
454 bd->mode.status |= BD_DONE; 455 bd->mode.status |= BD_DONE;
455 sdmac->buf_tail++; 456 sdmac->buf_tail++;
@@ -770,15 +771,15 @@ static void sdma_enable_channel(struct sdma_engine *sdma, int channel)
770 __raw_writel(1 << channel, sdma->regs + SDMA_H_START); 771 __raw_writel(1 << channel, sdma->regs + SDMA_H_START);
771} 772}
772 773
773static dma_cookie_t sdma_assign_cookie(struct sdma_channel *sdma) 774static dma_cookie_t sdma_assign_cookie(struct sdma_channel *sdmac)
774{ 775{
775 dma_cookie_t cookie = sdma->chan.cookie; 776 dma_cookie_t cookie = sdmac->chan.cookie;
776 777
777 if (++cookie < 0) 778 if (++cookie < 0)
778 cookie = 1; 779 cookie = 1;
779 780
780 sdma->chan.cookie = cookie; 781 sdmac->chan.cookie = cookie;
781 sdma->desc.cookie = cookie; 782 sdmac->desc.cookie = cookie;
782 783
783 return cookie; 784 return cookie;
784} 785}
@@ -798,7 +799,7 @@ static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx)
798 799
799 cookie = sdma_assign_cookie(sdmac); 800 cookie = sdma_assign_cookie(sdmac);
800 801
801 sdma_enable_channel(sdma, tx->chan->chan_id); 802 sdma_enable_channel(sdma, sdmac->channel);
802 803
803 spin_unlock_irq(&sdmac->lock); 804 spin_unlock_irq(&sdmac->lock);
804 805
@@ -811,10 +812,6 @@ static int sdma_alloc_chan_resources(struct dma_chan *chan)
811 struct imx_dma_data *data = chan->private; 812 struct imx_dma_data *data = chan->private;
812 int prio, ret; 813 int prio, ret;
813 814
814 /* No need to execute this for internal channel 0 */
815 if (chan->chan_id == 0)
816 return 0;
817
818 if (!data) 815 if (!data)
819 return -EINVAL; 816 return -EINVAL;
820 817
@@ -879,7 +876,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
879 struct sdma_channel *sdmac = to_sdma_chan(chan); 876 struct sdma_channel *sdmac = to_sdma_chan(chan);
880 struct sdma_engine *sdma = sdmac->sdma; 877 struct sdma_engine *sdma = sdmac->sdma;
881 int ret, i, count; 878 int ret, i, count;
882 int channel = chan->chan_id; 879 int channel = sdmac->channel;
883 struct scatterlist *sg; 880 struct scatterlist *sg;
884 881
885 if (sdmac->status == DMA_IN_PROGRESS) 882 if (sdmac->status == DMA_IN_PROGRESS)
@@ -924,22 +921,33 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
924 ret = -EINVAL; 921 ret = -EINVAL;
925 goto err_out; 922 goto err_out;
926 } 923 }
927 if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES) 924
925 switch (sdmac->word_size) {
926 case DMA_SLAVE_BUSWIDTH_4_BYTES:
928 bd->mode.command = 0; 927 bd->mode.command = 0;
929 else 928 if (count & 3 || sg->dma_address & 3)
930 bd->mode.command = sdmac->word_size; 929 return NULL;
930 break;
931 case DMA_SLAVE_BUSWIDTH_2_BYTES:
932 bd->mode.command = 2;
933 if (count & 1 || sg->dma_address & 1)
934 return NULL;
935 break;
936 case DMA_SLAVE_BUSWIDTH_1_BYTE:
937 bd->mode.command = 1;
938 break;
939 default:
940 return NULL;
941 }
931 942
932 param = BD_DONE | BD_EXTD | BD_CONT; 943 param = BD_DONE | BD_EXTD | BD_CONT;
933 944
934 if (sdmac->flags & IMX_DMA_SG_LOOP) { 945 if (i + 1 == sg_len) {
935 param |= BD_INTR; 946 param |= BD_INTR;
936 if (i + 1 == sg_len) 947 param |= BD_LAST;
937 param |= BD_WRAP; 948 param &= ~BD_CONT;
938 } 949 }
939 950
940 if (i + 1 == sg_len)
941 param |= BD_INTR;
942
943 dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n", 951 dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n",
944 i, count, sg->dma_address, 952 i, count, sg->dma_address,
945 param & BD_WRAP ? "wrap" : "", 953 param & BD_WRAP ? "wrap" : "",
@@ -953,6 +961,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
953 961
954 return &sdmac->desc; 962 return &sdmac->desc;
955err_out: 963err_out:
964 sdmac->status = DMA_ERROR;
956 return NULL; 965 return NULL;
957} 966}
958 967
@@ -963,7 +972,7 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
963 struct sdma_channel *sdmac = to_sdma_chan(chan); 972 struct sdma_channel *sdmac = to_sdma_chan(chan);
964 struct sdma_engine *sdma = sdmac->sdma; 973 struct sdma_engine *sdma = sdmac->sdma;
965 int num_periods = buf_len / period_len; 974 int num_periods = buf_len / period_len;
966 int channel = chan->chan_id; 975 int channel = sdmac->channel;
967 int ret, i = 0, buf = 0; 976 int ret, i = 0, buf = 0;
968 977
969 dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel); 978 dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel);
@@ -1066,14 +1075,12 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan,
1066{ 1075{
1067 struct sdma_channel *sdmac = to_sdma_chan(chan); 1076 struct sdma_channel *sdmac = to_sdma_chan(chan);
1068 dma_cookie_t last_used; 1077 dma_cookie_t last_used;
1069 enum dma_status ret;
1070 1078
1071 last_used = chan->cookie; 1079 last_used = chan->cookie;
1072 1080
1073 ret = dma_async_is_complete(cookie, sdmac->last_completed, last_used);
1074 dma_set_tx_state(txstate, sdmac->last_completed, last_used, 0); 1081 dma_set_tx_state(txstate, sdmac->last_completed, last_used, 0);
1075 1082
1076 return ret; 1083 return sdmac->status;
1077} 1084}
1078 1085
1079static void sdma_issue_pending(struct dma_chan *chan) 1086static void sdma_issue_pending(struct dma_chan *chan)
@@ -1135,7 +1142,7 @@ static int __init sdma_get_firmware(struct sdma_engine *sdma,
1135 /* download the RAM image for SDMA */ 1142 /* download the RAM image for SDMA */
1136 sdma_load_script(sdma, ram_code, 1143 sdma_load_script(sdma, ram_code,
1137 header->ram_code_size, 1144 header->ram_code_size,
1138 sdma->script_addrs->ram_code_start_addr); 1145 addr->ram_code_start_addr);
1139 clk_disable(sdma->clk); 1146 clk_disable(sdma->clk);
1140 1147
1141 sdma_add_scripts(sdma, addr); 1148 sdma_add_scripts(sdma, addr);
@@ -1237,7 +1244,6 @@ static int __init sdma_probe(struct platform_device *pdev)
1237 struct resource *iores; 1244 struct resource *iores;
1238 struct sdma_platform_data *pdata = pdev->dev.platform_data; 1245 struct sdma_platform_data *pdata = pdev->dev.platform_data;
1239 int i; 1246 int i;
1240 dma_cap_mask_t mask;
1241 struct sdma_engine *sdma; 1247 struct sdma_engine *sdma;
1242 1248
1243 sdma = kzalloc(sizeof(*sdma), GFP_KERNEL); 1249 sdma = kzalloc(sizeof(*sdma), GFP_KERNEL);
@@ -1280,6 +1286,9 @@ static int __init sdma_probe(struct platform_device *pdev)
1280 1286
1281 sdma->version = pdata->sdma_version; 1287 sdma->version = pdata->sdma_version;
1282 1288
1289 dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
1290 dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
1291
1283 INIT_LIST_HEAD(&sdma->dma_device.channels); 1292 INIT_LIST_HEAD(&sdma->dma_device.channels);
1284 /* Initialize channel parameters */ 1293 /* Initialize channel parameters */
1285 for (i = 0; i < MAX_DMA_CHANNELS; i++) { 1294 for (i = 0; i < MAX_DMA_CHANNELS; i++) {
@@ -1288,15 +1297,17 @@ static int __init sdma_probe(struct platform_device *pdev)
1288 sdmac->sdma = sdma; 1297 sdmac->sdma = sdma;
1289 spin_lock_init(&sdmac->lock); 1298 spin_lock_init(&sdmac->lock);
1290 1299
1291 dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
1292 dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
1293
1294 sdmac->chan.device = &sdma->dma_device; 1300 sdmac->chan.device = &sdma->dma_device;
1295 sdmac->chan.chan_id = i;
1296 sdmac->channel = i; 1301 sdmac->channel = i;
1297 1302
1298 /* Add the channel to the DMAC list */ 1303 /*
1299 list_add_tail(&sdmac->chan.device_node, &sdma->dma_device.channels); 1304 * Add the channel to the DMAC list. Do not add channel 0 though
1305 * because we need it internally in the SDMA driver. This also means
1306 * that channel 0 in dmaengine counting matches sdma channel 1.
1307 */
1308 if (i)
1309 list_add_tail(&sdmac->chan.device_node,
1310 &sdma->dma_device.channels);
1300 } 1311 }
1301 1312
1302 ret = sdma_init(sdma); 1313 ret = sdma_init(sdma);
@@ -1317,6 +1328,8 @@ static int __init sdma_probe(struct platform_device *pdev)
1317 sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic; 1328 sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
1318 sdma->dma_device.device_control = sdma_control; 1329 sdma->dma_device.device_control = sdma_control;
1319 sdma->dma_device.device_issue_pending = sdma_issue_pending; 1330 sdma->dma_device.device_issue_pending = sdma_issue_pending;
1331 sdma->dma_device.dev->dma_parms = &sdma->dma_parms;
1332 dma_set_max_seg_size(sdma->dma_device.dev, 65535);
1320 1333
1321 ret = dma_async_device_register(&sdma->dma_device); 1334 ret = dma_async_device_register(&sdma->dma_device);
1322 if (ret) { 1335 if (ret) {
@@ -1324,13 +1337,6 @@ static int __init sdma_probe(struct platform_device *pdev)
1324 goto err_init; 1337 goto err_init;
1325 } 1338 }
1326 1339
1327 /* request channel 0. This is an internal control channel
1328 * to the SDMA engine and not available to clients.
1329 */
1330 dma_cap_zero(mask);
1331 dma_cap_set(DMA_SLAVE, mask);
1332 dma_request_channel(mask, NULL, NULL);
1333
1334 dev_info(sdma->dev, "initialized\n"); 1340 dev_info(sdma->dev, "initialized\n");
1335 1341
1336 return 0; 1342 return 0;
@@ -1348,7 +1354,7 @@ err_clk:
1348err_request_region: 1354err_request_region:
1349err_irq: 1355err_irq:
1350 kfree(sdma); 1356 kfree(sdma);
1351 return 0; 1357 return ret;
1352} 1358}
1353 1359
1354static int __exit sdma_remove(struct platform_device *pdev) 1360static int __exit sdma_remove(struct platform_device *pdev)
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index cb26ee9773d6..c1a125e7d1df 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -1145,29 +1145,6 @@ static int ipu_disable_channel(struct idmac *idmac, struct idmac_channel *ichan,
1145 reg = idmac_read_icreg(ipu, IDMAC_CHA_EN); 1145 reg = idmac_read_icreg(ipu, IDMAC_CHA_EN);
1146 idmac_write_icreg(ipu, reg & ~chan_mask, IDMAC_CHA_EN); 1146 idmac_write_icreg(ipu, reg & ~chan_mask, IDMAC_CHA_EN);
1147 1147
1148 /*
1149 * Problem (observed with channel DMAIC_7): after enabling the channel
1150 * and initialising buffers, there comes an interrupt with current still
1151 * pointing at buffer 0, whereas it should use buffer 0 first and only
1152 * generate an interrupt when it is done, then current should already
1153 * point to buffer 1. This spurious interrupt also comes on channel
1154 * DMASDC_0. With DMAIC_7 normally, is we just leave the ISR after the
1155 * first interrupt, there comes the second with current correctly
1156 * pointing to buffer 1 this time. But sometimes this second interrupt
1157 * doesn't come and the channel hangs. Clearing BUFx_RDY when disabling
1158 * the channel seems to prevent the channel from hanging, but it doesn't
1159 * prevent the spurious interrupt. This might also be unsafe. Think
1160 * about the IDMAC controller trying to switch to a buffer, when we
1161 * clear the ready bit, and re-enable it a moment later.
1162 */
1163 reg = idmac_read_ipureg(ipu, IPU_CHA_BUF0_RDY);
1164 idmac_write_ipureg(ipu, 0, IPU_CHA_BUF0_RDY);
1165 idmac_write_ipureg(ipu, reg & ~(1UL << channel), IPU_CHA_BUF0_RDY);
1166
1167 reg = idmac_read_ipureg(ipu, IPU_CHA_BUF1_RDY);
1168 idmac_write_ipureg(ipu, 0, IPU_CHA_BUF1_RDY);
1169 idmac_write_ipureg(ipu, reg & ~(1UL << channel), IPU_CHA_BUF1_RDY);
1170
1171 spin_unlock_irqrestore(&ipu->lock, flags); 1148 spin_unlock_irqrestore(&ipu->lock, flags);
1172 1149
1173 return 0; 1150 return 0;
@@ -1246,33 +1223,6 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
1246 1223
1247 /* Other interrupts do not interfere with this channel */ 1224 /* Other interrupts do not interfere with this channel */
1248 spin_lock(&ichan->lock); 1225 spin_lock(&ichan->lock);
1249 if (unlikely(chan_id != IDMAC_SDC_0 && chan_id != IDMAC_SDC_1 &&
1250 ((curbuf >> chan_id) & 1) == ichan->active_buffer &&
1251 !list_is_last(ichan->queue.next, &ichan->queue))) {
1252 int i = 100;
1253
1254 /* This doesn't help. See comment in ipu_disable_channel() */
1255 while (--i) {
1256 curbuf = idmac_read_ipureg(&ipu_data, IPU_CHA_CUR_BUF);
1257 if (((curbuf >> chan_id) & 1) != ichan->active_buffer)
1258 break;
1259 cpu_relax();
1260 }
1261
1262 if (!i) {
1263 spin_unlock(&ichan->lock);
1264 dev_dbg(dev,
1265 "IRQ on active buffer on channel %x, active "
1266 "%d, ready %x, %x, current %x!\n", chan_id,
1267 ichan->active_buffer, ready0, ready1, curbuf);
1268 return IRQ_NONE;
1269 } else
1270 dev_dbg(dev,
1271 "Buffer deactivated on channel %x, active "
1272 "%d, ready %x, %x, current %x, rest %d!\n", chan_id,
1273 ichan->active_buffer, ready0, ready1, curbuf, i);
1274 }
1275
1276 if (unlikely((ichan->active_buffer && (ready1 >> chan_id) & 1) || 1226 if (unlikely((ichan->active_buffer && (ready1 >> chan_id) & 1) ||
1277 (!ichan->active_buffer && (ready0 >> chan_id) & 1) 1227 (!ichan->active_buffer && (ready0 >> chan_id) & 1)
1278 )) { 1228 )) {
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 4a5ecc58025d..23e03554f0d3 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -826,8 +826,6 @@ static void amd64_dump_dramcfg_low(u32 dclr, int chan)
826/* Display and decode various NB registers for debug purposes. */ 826/* Display and decode various NB registers for debug purposes. */
827static void amd64_dump_misc_regs(struct amd64_pvt *pvt) 827static void amd64_dump_misc_regs(struct amd64_pvt *pvt)
828{ 828{
829 int ganged;
830
831 debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap); 829 debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
832 830
833 debugf1(" NB two channel DRAM capable: %s\n", 831 debugf1(" NB two channel DRAM capable: %s\n",
@@ -851,28 +849,19 @@ static void amd64_dump_misc_regs(struct amd64_pvt *pvt)
851 debugf1(" DramHoleValid: %s\n", 849 debugf1(" DramHoleValid: %s\n",
852 (pvt->dhar & DHAR_VALID) ? "yes" : "no"); 850 (pvt->dhar & DHAR_VALID) ? "yes" : "no");
853 851
852 amd64_debug_display_dimm_sizes(0, pvt);
853
854 /* everything below this point is Fam10h and above */ 854 /* everything below this point is Fam10h and above */
855 if (boot_cpu_data.x86 == 0xf) { 855 if (boot_cpu_data.x86 == 0xf)
856 amd64_debug_display_dimm_sizes(0, pvt);
857 return; 856 return;
858 } 857
858 amd64_debug_display_dimm_sizes(1, pvt);
859 859
860 amd64_info("using %s syndromes.\n", ((pvt->syn_type == 8) ? "x8" : "x4")); 860 amd64_info("using %s syndromes.\n", ((pvt->syn_type == 8) ? "x8" : "x4"));
861 861
862 /* Only if NOT ganged does dclr1 have valid info */ 862 /* Only if NOT ganged does dclr1 have valid info */
863 if (!dct_ganging_enabled(pvt)) 863 if (!dct_ganging_enabled(pvt))
864 amd64_dump_dramcfg_low(pvt->dclr1, 1); 864 amd64_dump_dramcfg_low(pvt->dclr1, 1);
865
866 /*
867 * Determine if ganged and then dump memory sizes for first controller,
868 * and if NOT ganged dump info for 2nd controller.
869 */
870 ganged = dct_ganging_enabled(pvt);
871
872 amd64_debug_display_dimm_sizes(0, pvt);
873
874 if (!ganged)
875 amd64_debug_display_dimm_sizes(1, pvt);
876} 865}
877 866
878/* Read in both of DBAM registers */ 867/* Read in both of DBAM registers */
@@ -1644,11 +1633,10 @@ static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt)
1644 WARN_ON(ctrl != 0); 1633 WARN_ON(ctrl != 0);
1645 } 1634 }
1646 1635
1647 debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n", 1636 dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1 : pvt->dbam0;
1648 ctrl, ctrl ? pvt->dbam1 : pvt->dbam0); 1637 dcsb = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dcsb1 : pvt->dcsb0;
1649 1638
1650 dbam = ctrl ? pvt->dbam1 : pvt->dbam0; 1639 debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n", ctrl, dbam);
1651 dcsb = ctrl ? pvt->dcsb1 : pvt->dcsb0;
1652 1640
1653 edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl); 1641 edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
1654 1642
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index e28e41668177..bcb1126e3d00 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -378,10 +378,17 @@ static void __init print_filtered(const char *info)
378 378
379static void __init dmi_dump_ids(void) 379static void __init dmi_dump_ids(void)
380{ 380{
381 const char *board; /* Board Name is optional */
382
381 printk(KERN_DEBUG "DMI: "); 383 printk(KERN_DEBUG "DMI: ");
382 print_filtered(dmi_get_system_info(DMI_BOARD_NAME)); 384 print_filtered(dmi_get_system_info(DMI_SYS_VENDOR));
383 printk(KERN_CONT "/"); 385 printk(KERN_CONT " ");
384 print_filtered(dmi_get_system_info(DMI_PRODUCT_NAME)); 386 print_filtered(dmi_get_system_info(DMI_PRODUCT_NAME));
387 board = dmi_get_system_info(DMI_BOARD_NAME);
388 if (board) {
389 printk(KERN_CONT "/");
390 print_filtered(board);
391 }
385 printk(KERN_CONT ", BIOS "); 392 printk(KERN_CONT ", BIOS ");
386 print_filtered(dmi_get_system_info(DMI_BIOS_VERSION)); 393 print_filtered(dmi_get_system_info(DMI_BIOS_VERSION));
387 printk(KERN_CONT " "); 394 printk(KERN_CONT " ");
diff --git a/drivers/gpio/pca953x.c b/drivers/gpio/pca953x.c
index a261972f603d..b473429eee75 100644
--- a/drivers/gpio/pca953x.c
+++ b/drivers/gpio/pca953x.c
@@ -60,6 +60,7 @@ struct pca953x_chip {
60 unsigned gpio_start; 60 unsigned gpio_start;
61 uint16_t reg_output; 61 uint16_t reg_output;
62 uint16_t reg_direction; 62 uint16_t reg_direction;
63 struct mutex i2c_lock;
63 64
64#ifdef CONFIG_GPIO_PCA953X_IRQ 65#ifdef CONFIG_GPIO_PCA953X_IRQ
65 struct mutex irq_lock; 66 struct mutex irq_lock;
@@ -119,13 +120,17 @@ static int pca953x_gpio_direction_input(struct gpio_chip *gc, unsigned off)
119 120
120 chip = container_of(gc, struct pca953x_chip, gpio_chip); 121 chip = container_of(gc, struct pca953x_chip, gpio_chip);
121 122
123 mutex_lock(&chip->i2c_lock);
122 reg_val = chip->reg_direction | (1u << off); 124 reg_val = chip->reg_direction | (1u << off);
123 ret = pca953x_write_reg(chip, PCA953X_DIRECTION, reg_val); 125 ret = pca953x_write_reg(chip, PCA953X_DIRECTION, reg_val);
124 if (ret) 126 if (ret)
125 return ret; 127 goto exit;
126 128
127 chip->reg_direction = reg_val; 129 chip->reg_direction = reg_val;
128 return 0; 130 ret = 0;
131exit:
132 mutex_unlock(&chip->i2c_lock);
133 return ret;
129} 134}
130 135
131static int pca953x_gpio_direction_output(struct gpio_chip *gc, 136static int pca953x_gpio_direction_output(struct gpio_chip *gc,
@@ -137,6 +142,7 @@ static int pca953x_gpio_direction_output(struct gpio_chip *gc,
137 142
138 chip = container_of(gc, struct pca953x_chip, gpio_chip); 143 chip = container_of(gc, struct pca953x_chip, gpio_chip);
139 144
145 mutex_lock(&chip->i2c_lock);
140 /* set output level */ 146 /* set output level */
141 if (val) 147 if (val)
142 reg_val = chip->reg_output | (1u << off); 148 reg_val = chip->reg_output | (1u << off);
@@ -145,7 +151,7 @@ static int pca953x_gpio_direction_output(struct gpio_chip *gc,
145 151
146 ret = pca953x_write_reg(chip, PCA953X_OUTPUT, reg_val); 152 ret = pca953x_write_reg(chip, PCA953X_OUTPUT, reg_val);
147 if (ret) 153 if (ret)
148 return ret; 154 goto exit;
149 155
150 chip->reg_output = reg_val; 156 chip->reg_output = reg_val;
151 157
@@ -153,10 +159,13 @@ static int pca953x_gpio_direction_output(struct gpio_chip *gc,
153 reg_val = chip->reg_direction & ~(1u << off); 159 reg_val = chip->reg_direction & ~(1u << off);
154 ret = pca953x_write_reg(chip, PCA953X_DIRECTION, reg_val); 160 ret = pca953x_write_reg(chip, PCA953X_DIRECTION, reg_val);
155 if (ret) 161 if (ret)
156 return ret; 162 goto exit;
157 163
158 chip->reg_direction = reg_val; 164 chip->reg_direction = reg_val;
159 return 0; 165 ret = 0;
166exit:
167 mutex_unlock(&chip->i2c_lock);
168 return ret;
160} 169}
161 170
162static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off) 171static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off)
@@ -167,7 +176,9 @@ static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off)
167 176
168 chip = container_of(gc, struct pca953x_chip, gpio_chip); 177 chip = container_of(gc, struct pca953x_chip, gpio_chip);
169 178
179 mutex_lock(&chip->i2c_lock);
170 ret = pca953x_read_reg(chip, PCA953X_INPUT, &reg_val); 180 ret = pca953x_read_reg(chip, PCA953X_INPUT, &reg_val);
181 mutex_unlock(&chip->i2c_lock);
171 if (ret < 0) { 182 if (ret < 0) {
172 /* NOTE: diagnostic already emitted; that's all we should 183 /* NOTE: diagnostic already emitted; that's all we should
173 * do unless gpio_*_value_cansleep() calls become different 184 * do unless gpio_*_value_cansleep() calls become different
@@ -187,6 +198,7 @@ static void pca953x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val)
187 198
188 chip = container_of(gc, struct pca953x_chip, gpio_chip); 199 chip = container_of(gc, struct pca953x_chip, gpio_chip);
189 200
201 mutex_lock(&chip->i2c_lock);
190 if (val) 202 if (val)
191 reg_val = chip->reg_output | (1u << off); 203 reg_val = chip->reg_output | (1u << off);
192 else 204 else
@@ -194,9 +206,11 @@ static void pca953x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val)
194 206
195 ret = pca953x_write_reg(chip, PCA953X_OUTPUT, reg_val); 207 ret = pca953x_write_reg(chip, PCA953X_OUTPUT, reg_val);
196 if (ret) 208 if (ret)
197 return; 209 goto exit;
198 210
199 chip->reg_output = reg_val; 211 chip->reg_output = reg_val;
212exit:
213 mutex_unlock(&chip->i2c_lock);
200} 214}
201 215
202static void pca953x_setup_gpio(struct pca953x_chip *chip, int gpios) 216static void pca953x_setup_gpio(struct pca953x_chip *chip, int gpios)
@@ -517,6 +531,8 @@ static int __devinit pca953x_probe(struct i2c_client *client,
517 531
518 chip->names = pdata->names; 532 chip->names = pdata->names;
519 533
534 mutex_init(&chip->i2c_lock);
535
520 /* initialize cached registers from their original values. 536 /* initialize cached registers from their original values.
521 * we can't share this chip with another i2c master. 537 * we can't share this chip with another i2c master.
522 */ 538 */
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 2baa6708e44c..654faa803dcb 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -2674,3 +2674,23 @@ out:
2674 mutex_unlock(&dev->mode_config.mutex); 2674 mutex_unlock(&dev->mode_config.mutex);
2675 return ret; 2675 return ret;
2676} 2676}
2677
2678void drm_mode_config_reset(struct drm_device *dev)
2679{
2680 struct drm_crtc *crtc;
2681 struct drm_encoder *encoder;
2682 struct drm_connector *connector;
2683
2684 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
2685 if (crtc->funcs->reset)
2686 crtc->funcs->reset(crtc);
2687
2688 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
2689 if (encoder->funcs->reset)
2690 encoder->funcs->reset(encoder);
2691
2692 list_for_each_entry(connector, &dev->mode_config.connector_list, head)
2693 if (connector->funcs->reset)
2694 connector->funcs->reset(connector);
2695}
2696EXPORT_SYMBOL(drm_mode_config_reset);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 952b3d4fb2a6..92369655dca3 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -343,13 +343,12 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
343 struct drm_encoder *encoder; 343 struct drm_encoder *encoder;
344 bool ret = true; 344 bool ret = true;
345 345
346 adjusted_mode = drm_mode_duplicate(dev, mode);
347
348 crtc->enabled = drm_helper_crtc_in_use(crtc); 346 crtc->enabled = drm_helper_crtc_in_use(crtc);
349
350 if (!crtc->enabled) 347 if (!crtc->enabled)
351 return true; 348 return true;
352 349
350 adjusted_mode = drm_mode_duplicate(dev, mode);
351
353 saved_hwmode = crtc->hwmode; 352 saved_hwmode = crtc->hwmode;
354 saved_mode = crtc->mode; 353 saved_mode = crtc->mode;
355 saved_x = crtc->x; 354 saved_x = crtc->x;
@@ -437,10 +436,9 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
437 */ 436 */
438 drm_calc_timestamping_constants(crtc); 437 drm_calc_timestamping_constants(crtc);
439 438
440 /* XXX free adjustedmode */
441 drm_mode_destroy(dev, adjusted_mode);
442 /* FIXME: add subpixel order */ 439 /* FIXME: add subpixel order */
443done: 440done:
441 drm_mode_destroy(dev, adjusted_mode);
444 if (!ret) { 442 if (!ret) {
445 crtc->hwmode = saved_hwmode; 443 crtc->hwmode = saved_hwmode;
446 crtc->mode = saved_mode; 444 crtc->mode = saved_mode;
@@ -497,14 +495,17 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
497 495
498 crtc_funcs = set->crtc->helper_private; 496 crtc_funcs = set->crtc->helper_private;
499 497
498 if (!set->mode)
499 set->fb = NULL;
500
500 if (set->fb) { 501 if (set->fb) {
501 DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n", 502 DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
502 set->crtc->base.id, set->fb->base.id, 503 set->crtc->base.id, set->fb->base.id,
503 (int)set->num_connectors, set->x, set->y); 504 (int)set->num_connectors, set->x, set->y);
504 } else { 505 } else {
505 DRM_DEBUG_KMS("[CRTC:%d] [NOFB] #connectors=%d (x y) (%i %i)\n", 506 DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
506 set->crtc->base.id, (int)set->num_connectors, 507 set->mode = NULL;
507 set->x, set->y); 508 set->num_connectors = 0;
508 } 509 }
509 510
510 dev = set->crtc->dev; 511 dev = set->crtc->dev;
@@ -649,8 +650,8 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
649 mode_changed = true; 650 mode_changed = true;
650 651
651 if (mode_changed) { 652 if (mode_changed) {
652 set->crtc->enabled = (set->mode != NULL); 653 set->crtc->enabled = drm_helper_crtc_in_use(set->crtc);
653 if (set->mode != NULL) { 654 if (set->crtc->enabled) {
654 DRM_DEBUG_KMS("attempting to set mode from" 655 DRM_DEBUG_KMS("attempting to set mode from"
655 " userspace\n"); 656 " userspace\n");
656 drm_mode_debug_printmodeline(set->mode); 657 drm_mode_debug_printmodeline(set->mode);
@@ -665,6 +666,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
665 ret = -EINVAL; 666 ret = -EINVAL;
666 goto fail; 667 goto fail;
667 } 668 }
669 DRM_DEBUG_KMS("Setting connector DPMS state to on\n");
670 for (i = 0; i < set->num_connectors; i++) {
671 DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
672 drm_get_connector_name(set->connectors[i]));
673 set->connectors[i]->dpms = DRM_MODE_DPMS_ON;
674 }
668 } 675 }
669 drm_helper_disable_unused_functions(dev); 676 drm_helper_disable_unused_functions(dev);
670 } else if (fb_changed) { 677 } else if (fb_changed) {
@@ -681,12 +688,6 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
681 goto fail; 688 goto fail;
682 } 689 }
683 } 690 }
684 DRM_DEBUG_KMS("Setting connector DPMS state to on\n");
685 for (i = 0; i < set->num_connectors; i++) {
686 DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
687 drm_get_connector_name(set->connectors[i]));
688 set->connectors[i]->dpms = DRM_MODE_DPMS_ON;
689 }
690 691
691 kfree(save_connectors); 692 kfree(save_connectors);
692 kfree(save_encoders); 693 kfree(save_encoders);
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index 3cdbaf379bb5..be9a9c07d152 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -283,17 +283,18 @@ int drm_vma_info(struct seq_file *m, void *data)
283#endif 283#endif
284 284
285 mutex_lock(&dev->struct_mutex); 285 mutex_lock(&dev->struct_mutex);
286 seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n", 286 seq_printf(m, "vma use count: %d, high_memory = %pK, 0x%pK\n",
287 atomic_read(&dev->vma_count), 287 atomic_read(&dev->vma_count),
288 high_memory, (u64)virt_to_phys(high_memory)); 288 high_memory, (void *)virt_to_phys(high_memory));
289 289
290 list_for_each_entry(pt, &dev->vmalist, head) { 290 list_for_each_entry(pt, &dev->vmalist, head) {
291 vma = pt->vma; 291 vma = pt->vma;
292 if (!vma) 292 if (!vma)
293 continue; 293 continue;
294 seq_printf(m, 294 seq_printf(m,
295 "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000", 295 "\n%5d 0x%pK-0x%pK %c%c%c%c%c%c 0x%08lx000",
296 pt->pid, vma->vm_start, vma->vm_end, 296 pt->pid,
297 (void *)vma->vm_start, (void *)vma->vm_end,
297 vma->vm_flags & VM_READ ? 'r' : '-', 298 vma->vm_flags & VM_READ ? 'r' : '-',
298 vma->vm_flags & VM_WRITE ? 'w' : '-', 299 vma->vm_flags & VM_WRITE ? 'w' : '-',
299 vma->vm_flags & VM_EXEC ? 'x' : '-', 300 vma->vm_flags & VM_EXEC ? 'x' : '-',
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 0054e957203f..3dadfa2a8528 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -1250,7 +1250,7 @@ void drm_handle_vblank_events(struct drm_device *dev, int crtc)
1250 * Drivers should call this routine in their vblank interrupt handlers to 1250 * Drivers should call this routine in their vblank interrupt handlers to
1251 * update the vblank counter and send any signals that may be pending. 1251 * update the vblank counter and send any signals that may be pending.
1252 */ 1252 */
1253void drm_handle_vblank(struct drm_device *dev, int crtc) 1253bool drm_handle_vblank(struct drm_device *dev, int crtc)
1254{ 1254{
1255 u32 vblcount; 1255 u32 vblcount;
1256 s64 diff_ns; 1256 s64 diff_ns;
@@ -1258,7 +1258,7 @@ void drm_handle_vblank(struct drm_device *dev, int crtc)
1258 unsigned long irqflags; 1258 unsigned long irqflags;
1259 1259
1260 if (!dev->num_crtcs) 1260 if (!dev->num_crtcs)
1261 return; 1261 return false;
1262 1262
1263 /* Need timestamp lock to prevent concurrent execution with 1263 /* Need timestamp lock to prevent concurrent execution with
1264 * vblank enable/disable, as this would cause inconsistent 1264 * vblank enable/disable, as this would cause inconsistent
@@ -1269,7 +1269,7 @@ void drm_handle_vblank(struct drm_device *dev, int crtc)
1269 /* Vblank irq handling disabled. Nothing to do. */ 1269 /* Vblank irq handling disabled. Nothing to do. */
1270 if (!dev->vblank_enabled[crtc]) { 1270 if (!dev->vblank_enabled[crtc]) {
1271 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags); 1271 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
1272 return; 1272 return false;
1273 } 1273 }
1274 1274
1275 /* Fetch corresponding timestamp for this vblank interval from 1275 /* Fetch corresponding timestamp for this vblank interval from
@@ -1311,5 +1311,6 @@ void drm_handle_vblank(struct drm_device *dev, int crtc)
1311 drm_handle_vblank_events(dev, crtc); 1311 drm_handle_vblank_events(dev, crtc);
1312 1312
1313 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags); 1313 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
1314 return true;
1314} 1315}
1315EXPORT_SYMBOL(drm_handle_vblank); 1316EXPORT_SYMBOL(drm_handle_vblank);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 66796bb82d3e..0ad533f06af9 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -46,6 +46,9 @@ module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
46unsigned int i915_powersave = 1; 46unsigned int i915_powersave = 1;
47module_param_named(powersave, i915_powersave, int, 0600); 47module_param_named(powersave, i915_powersave, int, 0600);
48 48
49unsigned int i915_enable_rc6 = 0;
50module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
51
49unsigned int i915_lvds_downclock = 0; 52unsigned int i915_lvds_downclock = 0;
50module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); 53module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
51 54
@@ -354,12 +357,13 @@ static int i915_drm_thaw(struct drm_device *dev)
354 error = i915_gem_init_ringbuffer(dev); 357 error = i915_gem_init_ringbuffer(dev);
355 mutex_unlock(&dev->struct_mutex); 358 mutex_unlock(&dev->struct_mutex);
356 359
360 drm_mode_config_reset(dev);
357 drm_irq_install(dev); 361 drm_irq_install(dev);
358 362
359 /* Resume the modeset for every activated CRTC */ 363 /* Resume the modeset for every activated CRTC */
360 drm_helper_resume_force_mode(dev); 364 drm_helper_resume_force_mode(dev);
361 365
362 if (dev_priv->renderctx && dev_priv->pwrctx) 366 if (IS_IRONLAKE_M(dev))
363 ironlake_enable_rc6(dev); 367 ironlake_enable_rc6(dev);
364 } 368 }
365 369
@@ -542,6 +546,7 @@ int i915_reset(struct drm_device *dev, u8 flags)
542 546
543 mutex_unlock(&dev->struct_mutex); 547 mutex_unlock(&dev->struct_mutex);
544 drm_irq_uninstall(dev); 548 drm_irq_uninstall(dev);
549 drm_mode_config_reset(dev);
545 drm_irq_install(dev); 550 drm_irq_install(dev);
546 mutex_lock(&dev->struct_mutex); 551 mutex_lock(&dev->struct_mutex);
547 } 552 }
@@ -566,6 +571,14 @@ int i915_reset(struct drm_device *dev, u8 flags)
566static int __devinit 571static int __devinit
567i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 572i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
568{ 573{
574 /* Only bind to function 0 of the device. Early generations
575 * used function 1 as a placeholder for multi-head. This causes
576 * us confusion instead, especially on the systems where both
577 * functions have the same PCI-ID!
578 */
579 if (PCI_FUNC(pdev->devfn))
580 return -ENODEV;
581
569 return drm_get_pci_dev(pdev, ent, &driver); 582 return drm_get_pci_dev(pdev, ent, &driver);
570} 583}
571 584
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index a0149c619cdd..65dfe81d0035 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -958,6 +958,7 @@ extern unsigned int i915_fbpercrtc;
958extern unsigned int i915_powersave; 958extern unsigned int i915_powersave;
959extern unsigned int i915_lvds_downclock; 959extern unsigned int i915_lvds_downclock;
960extern unsigned int i915_panel_use_ssc; 960extern unsigned int i915_panel_use_ssc;
961extern unsigned int i915_enable_rc6;
961 962
962extern int i915_suspend(struct drm_device *dev, pm_message_t state); 963extern int i915_suspend(struct drm_device *dev, pm_message_t state);
963extern int i915_resume(struct drm_device *dev); 964extern int i915_resume(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 062f353497e6..97f946dcc1aa 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1196,18 +1196,18 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
1196 intel_finish_page_flip_plane(dev, 1); 1196 intel_finish_page_flip_plane(dev, 1);
1197 } 1197 }
1198 1198
1199 if (pipea_stats & vblank_status) { 1199 if (pipea_stats & vblank_status &&
1200 drm_handle_vblank(dev, 0)) {
1200 vblank++; 1201 vblank++;
1201 drm_handle_vblank(dev, 0);
1202 if (!dev_priv->flip_pending_is_done) { 1202 if (!dev_priv->flip_pending_is_done) {
1203 i915_pageflip_stall_check(dev, 0); 1203 i915_pageflip_stall_check(dev, 0);
1204 intel_finish_page_flip(dev, 0); 1204 intel_finish_page_flip(dev, 0);
1205 } 1205 }
1206 } 1206 }
1207 1207
1208 if (pipeb_stats & vblank_status) { 1208 if (pipeb_stats & vblank_status &&
1209 drm_handle_vblank(dev, 1)) {
1209 vblank++; 1210 vblank++;
1210 drm_handle_vblank(dev, 1);
1211 if (!dev_priv->flip_pending_is_done) { 1211 if (!dev_priv->flip_pending_is_done) {
1212 i915_pageflip_stall_check(dev, 1); 1212 i915_pageflip_stall_check(dev, 1);
1213 intel_finish_page_flip(dev, 1); 1213 intel_finish_page_flip(dev, 1);
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 5cfc68940f17..729d4233b763 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -174,7 +174,9 @@
174 * address/value pairs. Don't overdue it, though, x <= 2^4 must hold! 174 * address/value pairs. Don't overdue it, though, x <= 2^4 must hold!
175 */ 175 */
176#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1) 176#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1)
177#define MI_FLUSH_DW MI_INSTR(0x26, 2) /* for GEN6 */ 177#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */
178#define MI_INVALIDATE_TLB (1<<18)
179#define MI_INVALIDATE_BSD (1<<7)
178#define MI_BATCH_BUFFER MI_INSTR(0x30, 1) 180#define MI_BATCH_BUFFER MI_INSTR(0x30, 1)
179#define MI_BATCH_NON_SECURE (1) 181#define MI_BATCH_NON_SECURE (1)
180#define MI_BATCH_NON_SECURE_I965 (1<<8) 182#define MI_BATCH_NON_SECURE_I965 (1<<8)
@@ -1551,17 +1553,7 @@
1551 1553
1552/* Backlight control */ 1554/* Backlight control */
1553#define BLC_PWM_CTL 0x61254 1555#define BLC_PWM_CTL 0x61254
1554#define BACKLIGHT_MODULATION_FREQ_SHIFT (17)
1555#define BLC_PWM_CTL2 0x61250 /* 965+ only */ 1556#define BLC_PWM_CTL2 0x61250 /* 965+ only */
1556#define BLM_COMBINATION_MODE (1 << 30)
1557/*
1558 * This is the most significant 15 bits of the number of backlight cycles in a
1559 * complete cycle of the modulated backlight control.
1560 *
1561 * The actual value is this field multiplied by two.
1562 */
1563#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17)
1564#define BLM_LEGACY_MODE (1 << 16)
1565/* 1557/*
1566 * This is the number of cycles out of the backlight modulation cycle for which 1558 * This is the number of cycles out of the backlight modulation cycle for which
1567 * the backlight is on. 1559 * the backlight is on.
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 17035b87ee46..8a77ff4a7237 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -535,6 +535,15 @@ static int intel_crt_set_property(struct drm_connector *connector,
535 return 0; 535 return 0;
536} 536}
537 537
538static void intel_crt_reset(struct drm_connector *connector)
539{
540 struct drm_device *dev = connector->dev;
541 struct intel_crt *crt = intel_attached_crt(connector);
542
543 if (HAS_PCH_SPLIT(dev))
544 crt->force_hotplug_required = 1;
545}
546
538/* 547/*
539 * Routines for controlling stuff on the analog port 548 * Routines for controlling stuff on the analog port
540 */ 549 */
@@ -548,6 +557,7 @@ static const struct drm_encoder_helper_funcs intel_crt_helper_funcs = {
548}; 557};
549 558
550static const struct drm_connector_funcs intel_crt_connector_funcs = { 559static const struct drm_connector_funcs intel_crt_connector_funcs = {
560 .reset = intel_crt_reset,
551 .dpms = drm_helper_connector_dpms, 561 .dpms = drm_helper_connector_dpms,
552 .detect = intel_crt_detect, 562 .detect = intel_crt_detect,
553 .fill_modes = drm_helper_probe_single_connector_modes, 563 .fill_modes = drm_helper_probe_single_connector_modes,
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index d7f237deaaf0..3b006536b3d2 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -5551,6 +5551,16 @@ cleanup_work:
5551 return ret; 5551 return ret;
5552} 5552}
5553 5553
5554static void intel_crtc_reset(struct drm_crtc *crtc)
5555{
5556 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5557
5558 /* Reset flags back to the 'unknown' status so that they
5559 * will be correctly set on the initial modeset.
5560 */
5561 intel_crtc->dpms_mode = -1;
5562}
5563
5554static struct drm_crtc_helper_funcs intel_helper_funcs = { 5564static struct drm_crtc_helper_funcs intel_helper_funcs = {
5555 .dpms = intel_crtc_dpms, 5565 .dpms = intel_crtc_dpms,
5556 .mode_fixup = intel_crtc_mode_fixup, 5566 .mode_fixup = intel_crtc_mode_fixup,
@@ -5562,6 +5572,7 @@ static struct drm_crtc_helper_funcs intel_helper_funcs = {
5562}; 5572};
5563 5573
5564static const struct drm_crtc_funcs intel_crtc_funcs = { 5574static const struct drm_crtc_funcs intel_crtc_funcs = {
5575 .reset = intel_crtc_reset,
5565 .cursor_set = intel_crtc_cursor_set, 5576 .cursor_set = intel_crtc_cursor_set,
5566 .cursor_move = intel_crtc_cursor_move, 5577 .cursor_move = intel_crtc_cursor_move,
5567 .gamma_set = intel_crtc_gamma_set, 5578 .gamma_set = intel_crtc_gamma_set,
@@ -5652,8 +5663,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
5652 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base; 5663 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
5653 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base; 5664 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
5654 5665
5655 intel_crtc->cursor_addr = 0; 5666 intel_crtc_reset(&intel_crtc->base);
5656 intel_crtc->dpms_mode = -1;
5657 intel_crtc->active = true; /* force the pipe off on setup_init_config */ 5667 intel_crtc->active = true; /* force the pipe off on setup_init_config */
5658 5668
5659 if (HAS_PCH_SPLIT(dev)) { 5669 if (HAS_PCH_SPLIT(dev)) {
@@ -6452,52 +6462,60 @@ void intel_enable_clock_gating(struct drm_device *dev)
6452 } 6462 }
6453} 6463}
6454 6464
6455void intel_disable_clock_gating(struct drm_device *dev) 6465static void ironlake_teardown_rc6(struct drm_device *dev)
6456{ 6466{
6457 struct drm_i915_private *dev_priv = dev->dev_private; 6467 struct drm_i915_private *dev_priv = dev->dev_private;
6458 6468
6459 if (dev_priv->renderctx) { 6469 if (dev_priv->renderctx) {
6460 struct drm_i915_gem_object *obj = dev_priv->renderctx; 6470 i915_gem_object_unpin(dev_priv->renderctx);
6461 6471 drm_gem_object_unreference(&dev_priv->renderctx->base);
6462 I915_WRITE(CCID, 0);
6463 POSTING_READ(CCID);
6464
6465 i915_gem_object_unpin(obj);
6466 drm_gem_object_unreference(&obj->base);
6467 dev_priv->renderctx = NULL; 6472 dev_priv->renderctx = NULL;
6468 } 6473 }
6469 6474
6470 if (dev_priv->pwrctx) { 6475 if (dev_priv->pwrctx) {
6471 struct drm_i915_gem_object *obj = dev_priv->pwrctx; 6476 i915_gem_object_unpin(dev_priv->pwrctx);
6477 drm_gem_object_unreference(&dev_priv->pwrctx->base);
6478 dev_priv->pwrctx = NULL;
6479 }
6480}
6481
6482static void ironlake_disable_rc6(struct drm_device *dev)
6483{
6484 struct drm_i915_private *dev_priv = dev->dev_private;
6485
6486 if (I915_READ(PWRCTXA)) {
6487 /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
6488 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
6489 wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
6490 50);
6472 6491
6473 I915_WRITE(PWRCTXA, 0); 6492 I915_WRITE(PWRCTXA, 0);
6474 POSTING_READ(PWRCTXA); 6493 POSTING_READ(PWRCTXA);
6475 6494
6476 i915_gem_object_unpin(obj); 6495 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
6477 drm_gem_object_unreference(&obj->base); 6496 POSTING_READ(RSTDBYCTL);
6478 dev_priv->pwrctx = NULL;
6479 } 6497 }
6498
6499 ironlake_disable_rc6(dev);
6480} 6500}
6481 6501
6482static void ironlake_disable_rc6(struct drm_device *dev) 6502static int ironlake_setup_rc6(struct drm_device *dev)
6483{ 6503{
6484 struct drm_i915_private *dev_priv = dev->dev_private; 6504 struct drm_i915_private *dev_priv = dev->dev_private;
6485 6505
6486 /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */ 6506 if (dev_priv->renderctx == NULL)
6487 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT); 6507 dev_priv->renderctx = intel_alloc_context_page(dev);
6488 wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON), 6508 if (!dev_priv->renderctx)
6489 10); 6509 return -ENOMEM;
6490 POSTING_READ(CCID); 6510
6491 I915_WRITE(PWRCTXA, 0); 6511 if (dev_priv->pwrctx == NULL)
6492 POSTING_READ(PWRCTXA); 6512 dev_priv->pwrctx = intel_alloc_context_page(dev);
6493 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); 6513 if (!dev_priv->pwrctx) {
6494 POSTING_READ(RSTDBYCTL); 6514 ironlake_teardown_rc6(dev);
6495 i915_gem_object_unpin(dev_priv->renderctx); 6515 return -ENOMEM;
6496 drm_gem_object_unreference(&dev_priv->renderctx->base); 6516 }
6497 dev_priv->renderctx = NULL; 6517
6498 i915_gem_object_unpin(dev_priv->pwrctx); 6518 return 0;
6499 drm_gem_object_unreference(&dev_priv->pwrctx->base);
6500 dev_priv->pwrctx = NULL;
6501} 6519}
6502 6520
6503void ironlake_enable_rc6(struct drm_device *dev) 6521void ironlake_enable_rc6(struct drm_device *dev)
@@ -6505,15 +6523,26 @@ void ironlake_enable_rc6(struct drm_device *dev)
6505 struct drm_i915_private *dev_priv = dev->dev_private; 6523 struct drm_i915_private *dev_priv = dev->dev_private;
6506 int ret; 6524 int ret;
6507 6525
6526 /* rc6 disabled by default due to repeated reports of hanging during
6527 * boot and resume.
6528 */
6529 if (!i915_enable_rc6)
6530 return;
6531
6532 ret = ironlake_setup_rc6(dev);
6533 if (ret)
6534 return;
6535
6508 /* 6536 /*
6509 * GPU can automatically power down the render unit if given a page 6537 * GPU can automatically power down the render unit if given a page
6510 * to save state. 6538 * to save state.
6511 */ 6539 */
6512 ret = BEGIN_LP_RING(6); 6540 ret = BEGIN_LP_RING(6);
6513 if (ret) { 6541 if (ret) {
6514 ironlake_disable_rc6(dev); 6542 ironlake_teardown_rc6(dev);
6515 return; 6543 return;
6516 } 6544 }
6545
6517 OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN); 6546 OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
6518 OUT_RING(MI_SET_CONTEXT); 6547 OUT_RING(MI_SET_CONTEXT);
6519 OUT_RING(dev_priv->renderctx->gtt_offset | 6548 OUT_RING(dev_priv->renderctx->gtt_offset |
@@ -6530,6 +6559,7 @@ void ironlake_enable_rc6(struct drm_device *dev)
6530 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); 6559 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
6531} 6560}
6532 6561
6562
6533/* Set up chip specific display functions */ 6563/* Set up chip specific display functions */
6534static void intel_init_display(struct drm_device *dev) 6564static void intel_init_display(struct drm_device *dev)
6535{ 6565{
@@ -6772,21 +6802,9 @@ void intel_modeset_init(struct drm_device *dev)
6772 if (IS_GEN6(dev)) 6802 if (IS_GEN6(dev))
6773 gen6_enable_rps(dev_priv); 6803 gen6_enable_rps(dev_priv);
6774 6804
6775 if (IS_IRONLAKE_M(dev)) { 6805 if (IS_IRONLAKE_M(dev))
6776 dev_priv->renderctx = intel_alloc_context_page(dev);
6777 if (!dev_priv->renderctx)
6778 goto skip_rc6;
6779 dev_priv->pwrctx = intel_alloc_context_page(dev);
6780 if (!dev_priv->pwrctx) {
6781 i915_gem_object_unpin(dev_priv->renderctx);
6782 drm_gem_object_unreference(&dev_priv->renderctx->base);
6783 dev_priv->renderctx = NULL;
6784 goto skip_rc6;
6785 }
6786 ironlake_enable_rc6(dev); 6806 ironlake_enable_rc6(dev);
6787 }
6788 6807
6789skip_rc6:
6790 INIT_WORK(&dev_priv->idle_work, intel_idle_update); 6808 INIT_WORK(&dev_priv->idle_work, intel_idle_update);
6791 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, 6809 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
6792 (unsigned long)dev); 6810 (unsigned long)dev);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 1f4242b682c8..51cb4e36997f 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1639,6 +1639,24 @@ static int intel_dp_get_modes(struct drm_connector *connector)
1639 return 0; 1639 return 0;
1640} 1640}
1641 1641
1642static bool
1643intel_dp_detect_audio(struct drm_connector *connector)
1644{
1645 struct intel_dp *intel_dp = intel_attached_dp(connector);
1646 struct edid *edid;
1647 bool has_audio = false;
1648
1649 edid = drm_get_edid(connector, &intel_dp->adapter);
1650 if (edid) {
1651 has_audio = drm_detect_monitor_audio(edid);
1652
1653 connector->display_info.raw_edid = NULL;
1654 kfree(edid);
1655 }
1656
1657 return has_audio;
1658}
1659
1642static int 1660static int
1643intel_dp_set_property(struct drm_connector *connector, 1661intel_dp_set_property(struct drm_connector *connector,
1644 struct drm_property *property, 1662 struct drm_property *property,
@@ -1652,17 +1670,23 @@ intel_dp_set_property(struct drm_connector *connector,
1652 return ret; 1670 return ret;
1653 1671
1654 if (property == intel_dp->force_audio_property) { 1672 if (property == intel_dp->force_audio_property) {
1655 if (val == intel_dp->force_audio) 1673 int i = val;
1674 bool has_audio;
1675
1676 if (i == intel_dp->force_audio)
1656 return 0; 1677 return 0;
1657 1678
1658 intel_dp->force_audio = val; 1679 intel_dp->force_audio = i;
1659 1680
1660 if (val > 0 && intel_dp->has_audio) 1681 if (i == 0)
1661 return 0; 1682 has_audio = intel_dp_detect_audio(connector);
1662 if (val < 0 && !intel_dp->has_audio) 1683 else
1684 has_audio = i > 0;
1685
1686 if (has_audio == intel_dp->has_audio)
1663 return 0; 1687 return 0;
1664 1688
1665 intel_dp->has_audio = val > 0; 1689 intel_dp->has_audio = has_audio;
1666 goto done; 1690 goto done;
1667 } 1691 }
1668 1692
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 74db2557d644..2c431049963c 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -298,7 +298,6 @@ extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
298extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, 298extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
299 u16 *blue, int regno); 299 u16 *blue, int regno);
300extern void intel_enable_clock_gating(struct drm_device *dev); 300extern void intel_enable_clock_gating(struct drm_device *dev);
301extern void intel_disable_clock_gating(struct drm_device *dev);
302extern void ironlake_enable_drps(struct drm_device *dev); 301extern void ironlake_enable_drps(struct drm_device *dev);
303extern void ironlake_disable_drps(struct drm_device *dev); 302extern void ironlake_disable_drps(struct drm_device *dev);
304extern void gen6_enable_rps(struct drm_i915_private *dev_priv); 303extern void gen6_enable_rps(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 0d0273e7b029..c635c9e357b9 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -251,6 +251,27 @@ static int intel_hdmi_get_modes(struct drm_connector *connector)
251 &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter); 251 &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter);
252} 252}
253 253
254static bool
255intel_hdmi_detect_audio(struct drm_connector *connector)
256{
257 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
258 struct drm_i915_private *dev_priv = connector->dev->dev_private;
259 struct edid *edid;
260 bool has_audio = false;
261
262 edid = drm_get_edid(connector,
263 &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter);
264 if (edid) {
265 if (edid->input & DRM_EDID_INPUT_DIGITAL)
266 has_audio = drm_detect_monitor_audio(edid);
267
268 connector->display_info.raw_edid = NULL;
269 kfree(edid);
270 }
271
272 return has_audio;
273}
274
254static int 275static int
255intel_hdmi_set_property(struct drm_connector *connector, 276intel_hdmi_set_property(struct drm_connector *connector,
256 struct drm_property *property, 277 struct drm_property *property,
@@ -264,17 +285,23 @@ intel_hdmi_set_property(struct drm_connector *connector,
264 return ret; 285 return ret;
265 286
266 if (property == intel_hdmi->force_audio_property) { 287 if (property == intel_hdmi->force_audio_property) {
267 if (val == intel_hdmi->force_audio) 288 int i = val;
289 bool has_audio;
290
291 if (i == intel_hdmi->force_audio)
268 return 0; 292 return 0;
269 293
270 intel_hdmi->force_audio = val; 294 intel_hdmi->force_audio = i;
271 295
272 if (val > 0 && intel_hdmi->has_audio) 296 if (i == 0)
273 return 0; 297 has_audio = intel_hdmi_detect_audio(connector);
274 if (val < 0 && !intel_hdmi->has_audio) 298 else
299 has_audio = i > 0;
300
301 if (has_audio == intel_hdmi->has_audio)
275 return 0; 302 return 0;
276 303
277 intel_hdmi->has_audio = val > 0; 304 intel_hdmi->has_audio = has_audio;
278 goto done; 305 goto done;
279 } 306 }
280 307
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index ace8d5d30dd2..bcdba7bd5cfa 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -261,12 +261,6 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
261 return true; 261 return true;
262 } 262 }
263 263
264 /* Make sure pre-965s set dither correctly */
265 if (INTEL_INFO(dev)->gen < 4) {
266 if (dev_priv->lvds_dither)
267 pfit_control |= PANEL_8TO6_DITHER_ENABLE;
268 }
269
270 /* Native modes don't need fitting */ 264 /* Native modes don't need fitting */
271 if (adjusted_mode->hdisplay == mode->hdisplay && 265 if (adjusted_mode->hdisplay == mode->hdisplay &&
272 adjusted_mode->vdisplay == mode->vdisplay) 266 adjusted_mode->vdisplay == mode->vdisplay)
@@ -374,10 +368,16 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
374 } 368 }
375 369
376out: 370out:
371 /* If not enabling scaling, be consistent and always use 0. */
377 if ((pfit_control & PFIT_ENABLE) == 0) { 372 if ((pfit_control & PFIT_ENABLE) == 0) {
378 pfit_control = 0; 373 pfit_control = 0;
379 pfit_pgm_ratios = 0; 374 pfit_pgm_ratios = 0;
380 } 375 }
376
377 /* Make sure pre-965 set dither correctly */
378 if (INTEL_INFO(dev)->gen < 4 && dev_priv->lvds_dither)
379 pfit_control |= PANEL_8TO6_DITHER_ENABLE;
380
381 if (pfit_control != intel_lvds->pfit_control || 381 if (pfit_control != intel_lvds->pfit_control ||
382 pfit_pgm_ratios != intel_lvds->pfit_pgm_ratios) { 382 pfit_pgm_ratios != intel_lvds->pfit_pgm_ratios) {
383 intel_lvds->pfit_control = pfit_control; 383 intel_lvds->pfit_control = pfit_control;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index c65992df458d..d860abeda70f 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -30,8 +30,6 @@
30 30
31#include "intel_drv.h" 31#include "intel_drv.h"
32 32
33#define PCI_LBPC 0xf4 /* legacy/combination backlight modes */
34
35void 33void
36intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, 34intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
37 struct drm_display_mode *adjusted_mode) 35 struct drm_display_mode *adjusted_mode)
@@ -112,19 +110,6 @@ done:
112 dev_priv->pch_pf_size = (width << 16) | height; 110 dev_priv->pch_pf_size = (width << 16) | height;
113} 111}
114 112
115static int is_backlight_combination_mode(struct drm_device *dev)
116{
117 struct drm_i915_private *dev_priv = dev->dev_private;
118
119 if (INTEL_INFO(dev)->gen >= 4)
120 return I915_READ(BLC_PWM_CTL2) & BLM_COMBINATION_MODE;
121
122 if (IS_GEN2(dev))
123 return I915_READ(BLC_PWM_CTL) & BLM_LEGACY_MODE;
124
125 return 0;
126}
127
128static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv) 113static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv)
129{ 114{
130 u32 val; 115 u32 val;
@@ -181,9 +166,6 @@ u32 intel_panel_get_max_backlight(struct drm_device *dev)
181 if (INTEL_INFO(dev)->gen < 4) 166 if (INTEL_INFO(dev)->gen < 4)
182 max &= ~1; 167 max &= ~1;
183 } 168 }
184
185 if (is_backlight_combination_mode(dev))
186 max *= 0xff;
187 } 169 }
188 170
189 DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max); 171 DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max);
@@ -201,15 +183,6 @@ u32 intel_panel_get_backlight(struct drm_device *dev)
201 val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; 183 val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
202 if (IS_PINEVIEW(dev)) 184 if (IS_PINEVIEW(dev))
203 val >>= 1; 185 val >>= 1;
204
205 if (is_backlight_combination_mode(dev)){
206 u8 lbpc;
207
208 val &= ~1;
209 pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc);
210 val *= lbpc;
211 val >>= 1;
212 }
213 } 186 }
214 187
215 DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val); 188 DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
@@ -232,16 +205,6 @@ void intel_panel_set_backlight(struct drm_device *dev, u32 level)
232 205
233 if (HAS_PCH_SPLIT(dev)) 206 if (HAS_PCH_SPLIT(dev))
234 return intel_pch_panel_set_backlight(dev, level); 207 return intel_pch_panel_set_backlight(dev, level);
235
236 if (is_backlight_combination_mode(dev)){
237 u32 max = intel_panel_get_max_backlight(dev);
238 u8 lpbc;
239
240 lpbc = level * 0xfe / max + 1;
241 level /= lpbc;
242 pci_write_config_byte(dev->pdev, PCI_LBPC, lpbc);
243 }
244
245 tmp = I915_READ(BLC_PWM_CTL); 208 tmp = I915_READ(BLC_PWM_CTL);
246 if (IS_PINEVIEW(dev)) { 209 if (IS_PINEVIEW(dev)) {
247 tmp &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1); 210 tmp &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 6218fa97aa1e..445f27efe677 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1059,22 +1059,25 @@ static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
1059} 1059}
1060 1060
1061static int gen6_ring_flush(struct intel_ring_buffer *ring, 1061static int gen6_ring_flush(struct intel_ring_buffer *ring,
1062 u32 invalidate_domains, 1062 u32 invalidate, u32 flush)
1063 u32 flush_domains)
1064{ 1063{
1064 uint32_t cmd;
1065 int ret; 1065 int ret;
1066 1066
1067 if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0) 1067 if (((invalidate | flush) & I915_GEM_GPU_DOMAINS) == 0)
1068 return 0; 1068 return 0;
1069 1069
1070 ret = intel_ring_begin(ring, 4); 1070 ret = intel_ring_begin(ring, 4);
1071 if (ret) 1071 if (ret)
1072 return ret; 1072 return ret;
1073 1073
1074 intel_ring_emit(ring, MI_FLUSH_DW); 1074 cmd = MI_FLUSH_DW;
1075 intel_ring_emit(ring, 0); 1075 if (invalidate & I915_GEM_GPU_DOMAINS)
1076 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
1077 intel_ring_emit(ring, cmd);
1076 intel_ring_emit(ring, 0); 1078 intel_ring_emit(ring, 0);
1077 intel_ring_emit(ring, 0); 1079 intel_ring_emit(ring, 0);
1080 intel_ring_emit(ring, MI_NOOP);
1078 intel_ring_advance(ring); 1081 intel_ring_advance(ring);
1079 return 0; 1082 return 0;
1080} 1083}
@@ -1230,22 +1233,25 @@ static int blt_ring_begin(struct intel_ring_buffer *ring,
1230} 1233}
1231 1234
1232static int blt_ring_flush(struct intel_ring_buffer *ring, 1235static int blt_ring_flush(struct intel_ring_buffer *ring,
1233 u32 invalidate_domains, 1236 u32 invalidate, u32 flush)
1234 u32 flush_domains)
1235{ 1237{
1238 uint32_t cmd;
1236 int ret; 1239 int ret;
1237 1240
1238 if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0) 1241 if (((invalidate | flush) & I915_GEM_DOMAIN_RENDER) == 0)
1239 return 0; 1242 return 0;
1240 1243
1241 ret = blt_ring_begin(ring, 4); 1244 ret = blt_ring_begin(ring, 4);
1242 if (ret) 1245 if (ret)
1243 return ret; 1246 return ret;
1244 1247
1245 intel_ring_emit(ring, MI_FLUSH_DW); 1248 cmd = MI_FLUSH_DW;
1246 intel_ring_emit(ring, 0); 1249 if (invalidate & I915_GEM_DOMAIN_RENDER)
1250 cmd |= MI_INVALIDATE_TLB;
1251 intel_ring_emit(ring, cmd);
1247 intel_ring_emit(ring, 0); 1252 intel_ring_emit(ring, 0);
1248 intel_ring_emit(ring, 0); 1253 intel_ring_emit(ring, 0);
1254 intel_ring_emit(ring, MI_NOOP);
1249 intel_ring_advance(ring); 1255 intel_ring_advance(ring);
1250 return 0; 1256 return 0;
1251} 1257}
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 45cd37652a37..7c50cdce84f0 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -46,6 +46,7 @@
46 SDVO_TV_MASK) 46 SDVO_TV_MASK)
47 47
48#define IS_TV(c) (c->output_flag & SDVO_TV_MASK) 48#define IS_TV(c) (c->output_flag & SDVO_TV_MASK)
49#define IS_TMDS(c) (c->output_flag & SDVO_TMDS_MASK)
49#define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK) 50#define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK)
50#define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK)) 51#define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK))
51 52
@@ -473,20 +474,6 @@ static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
473 return false; 474 return false;
474 } 475 }
475 476
476 i = 3;
477 while (status == SDVO_CMD_STATUS_PENDING && i--) {
478 if (!intel_sdvo_read_byte(intel_sdvo,
479 SDVO_I2C_CMD_STATUS,
480 &status))
481 return false;
482 }
483 if (status != SDVO_CMD_STATUS_SUCCESS) {
484 DRM_DEBUG_KMS("command returns response %s [%d]\n",
485 status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP ? cmd_status_names[status] : "???",
486 status);
487 return false;
488 }
489
490 return true; 477 return true;
491} 478}
492 479
@@ -497,6 +484,8 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
497 u8 status; 484 u8 status;
498 int i; 485 int i;
499 486
487 DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo));
488
500 /* 489 /*
501 * The documentation states that all commands will be 490 * The documentation states that all commands will be
502 * processed within 15µs, and that we need only poll 491 * processed within 15µs, and that we need only poll
@@ -505,14 +494,19 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
505 * 494 *
506 * Check 5 times in case the hardware failed to read the docs. 495 * Check 5 times in case the hardware failed to read the docs.
507 */ 496 */
508 do { 497 if (!intel_sdvo_read_byte(intel_sdvo,
498 SDVO_I2C_CMD_STATUS,
499 &status))
500 goto log_fail;
501
502 while (status == SDVO_CMD_STATUS_PENDING && retry--) {
503 udelay(15);
509 if (!intel_sdvo_read_byte(intel_sdvo, 504 if (!intel_sdvo_read_byte(intel_sdvo,
510 SDVO_I2C_CMD_STATUS, 505 SDVO_I2C_CMD_STATUS,
511 &status)) 506 &status))
512 return false; 507 goto log_fail;
513 } while (status == SDVO_CMD_STATUS_PENDING && --retry); 508 }
514 509
515 DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo));
516 if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP) 510 if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
517 DRM_LOG_KMS("(%s)", cmd_status_names[status]); 511 DRM_LOG_KMS("(%s)", cmd_status_names[status]);
518 else 512 else
@@ -533,7 +527,7 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
533 return true; 527 return true;
534 528
535log_fail: 529log_fail:
536 DRM_LOG_KMS("\n"); 530 DRM_LOG_KMS("... failed\n");
537 return false; 531 return false;
538} 532}
539 533
@@ -550,6 +544,7 @@ static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
550static bool intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo, 544static bool intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo,
551 u8 ddc_bus) 545 u8 ddc_bus)
552{ 546{
547 /* This must be the immediately preceding write before the i2c xfer */
553 return intel_sdvo_write_cmd(intel_sdvo, 548 return intel_sdvo_write_cmd(intel_sdvo,
554 SDVO_CMD_SET_CONTROL_BUS_SWITCH, 549 SDVO_CMD_SET_CONTROL_BUS_SWITCH,
555 &ddc_bus, 1); 550 &ddc_bus, 1);
@@ -557,7 +552,10 @@ static bool intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo,
557 552
558static bool intel_sdvo_set_value(struct intel_sdvo *intel_sdvo, u8 cmd, const void *data, int len) 553static bool intel_sdvo_set_value(struct intel_sdvo *intel_sdvo, u8 cmd, const void *data, int len)
559{ 554{
560 return intel_sdvo_write_cmd(intel_sdvo, cmd, data, len); 555 if (!intel_sdvo_write_cmd(intel_sdvo, cmd, data, len))
556 return false;
557
558 return intel_sdvo_read_response(intel_sdvo, NULL, 0);
561} 559}
562 560
563static bool 561static bool
@@ -859,18 +857,21 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo)
859 857
860 intel_dip_infoframe_csum(&avi_if); 858 intel_dip_infoframe_csum(&avi_if);
861 859
862 if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_INDEX, 860 if (!intel_sdvo_set_value(intel_sdvo,
861 SDVO_CMD_SET_HBUF_INDEX,
863 set_buf_index, 2)) 862 set_buf_index, 2))
864 return false; 863 return false;
865 864
866 for (i = 0; i < sizeof(avi_if); i += 8) { 865 for (i = 0; i < sizeof(avi_if); i += 8) {
867 if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_DATA, 866 if (!intel_sdvo_set_value(intel_sdvo,
867 SDVO_CMD_SET_HBUF_DATA,
868 data, 8)) 868 data, 8))
869 return false; 869 return false;
870 data++; 870 data++;
871 } 871 }
872 872
873 return intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_TXRATE, 873 return intel_sdvo_set_value(intel_sdvo,
874 SDVO_CMD_SET_HBUF_TXRATE,
874 &tx_rate, 1); 875 &tx_rate, 1);
875} 876}
876 877
@@ -1359,7 +1360,8 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
1359 intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid); 1360 intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid);
1360 intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid); 1361 intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid);
1361 } 1362 }
1362 } 1363 } else
1364 status = connector_status_disconnected;
1363 connector->display_info.raw_edid = NULL; 1365 connector->display_info.raw_edid = NULL;
1364 kfree(edid); 1366 kfree(edid);
1365 } 1367 }
@@ -1407,10 +1409,25 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
1407 1409
1408 if ((intel_sdvo_connector->output_flag & response) == 0) 1410 if ((intel_sdvo_connector->output_flag & response) == 0)
1409 ret = connector_status_disconnected; 1411 ret = connector_status_disconnected;
1410 else if (response & SDVO_TMDS_MASK) 1412 else if (IS_TMDS(intel_sdvo_connector))
1411 ret = intel_sdvo_hdmi_sink_detect(connector); 1413 ret = intel_sdvo_hdmi_sink_detect(connector);
1412 else 1414 else {
1413 ret = connector_status_connected; 1415 struct edid *edid;
1416
1417 /* if we have an edid check it matches the connection */
1418 edid = intel_sdvo_get_edid(connector);
1419 if (edid == NULL)
1420 edid = intel_sdvo_get_analog_edid(connector);
1421 if (edid != NULL) {
1422 if (edid->input & DRM_EDID_INPUT_DIGITAL)
1423 ret = connector_status_disconnected;
1424 else
1425 ret = connector_status_connected;
1426 connector->display_info.raw_edid = NULL;
1427 kfree(edid);
1428 } else
1429 ret = connector_status_connected;
1430 }
1414 1431
1415 /* May update encoder flag for like clock for SDVO TV, etc.*/ 1432 /* May update encoder flag for like clock for SDVO TV, etc.*/
1416 if (ret == connector_status_connected) { 1433 if (ret == connector_status_connected) {
@@ -1446,10 +1463,15 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
1446 edid = intel_sdvo_get_analog_edid(connector); 1463 edid = intel_sdvo_get_analog_edid(connector);
1447 1464
1448 if (edid != NULL) { 1465 if (edid != NULL) {
1449 if (edid->input & DRM_EDID_INPUT_DIGITAL) { 1466 struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
1467 bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
1468 bool connector_is_digital = !!IS_TMDS(intel_sdvo_connector);
1469
1470 if (connector_is_digital == monitor_is_digital) {
1450 drm_mode_connector_update_edid_property(connector, edid); 1471 drm_mode_connector_update_edid_property(connector, edid);
1451 drm_add_edid_modes(connector, edid); 1472 drm_add_edid_modes(connector, edid);
1452 } 1473 }
1474
1453 connector->display_info.raw_edid = NULL; 1475 connector->display_info.raw_edid = NULL;
1454 kfree(edid); 1476 kfree(edid);
1455 } 1477 }
@@ -1668,6 +1690,22 @@ static void intel_sdvo_destroy(struct drm_connector *connector)
1668 kfree(connector); 1690 kfree(connector);
1669} 1691}
1670 1692
1693static bool intel_sdvo_detect_hdmi_audio(struct drm_connector *connector)
1694{
1695 struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
1696 struct edid *edid;
1697 bool has_audio = false;
1698
1699 if (!intel_sdvo->is_hdmi)
1700 return false;
1701
1702 edid = intel_sdvo_get_edid(connector);
1703 if (edid != NULL && edid->input & DRM_EDID_INPUT_DIGITAL)
1704 has_audio = drm_detect_monitor_audio(edid);
1705
1706 return has_audio;
1707}
1708
1671static int 1709static int
1672intel_sdvo_set_property(struct drm_connector *connector, 1710intel_sdvo_set_property(struct drm_connector *connector,
1673 struct drm_property *property, 1711 struct drm_property *property,
@@ -1684,17 +1722,23 @@ intel_sdvo_set_property(struct drm_connector *connector,
1684 return ret; 1722 return ret;
1685 1723
1686 if (property == intel_sdvo_connector->force_audio_property) { 1724 if (property == intel_sdvo_connector->force_audio_property) {
1687 if (val == intel_sdvo_connector->force_audio) 1725 int i = val;
1726 bool has_audio;
1727
1728 if (i == intel_sdvo_connector->force_audio)
1688 return 0; 1729 return 0;
1689 1730
1690 intel_sdvo_connector->force_audio = val; 1731 intel_sdvo_connector->force_audio = i;
1691 1732
1692 if (val > 0 && intel_sdvo->has_hdmi_audio) 1733 if (i == 0)
1693 return 0; 1734 has_audio = intel_sdvo_detect_hdmi_audio(connector);
1694 if (val < 0 && !intel_sdvo->has_hdmi_audio) 1735 else
1736 has_audio = i > 0;
1737
1738 if (has_audio == intel_sdvo->has_hdmi_audio)
1695 return 0; 1739 return 0;
1696 1740
1697 intel_sdvo->has_hdmi_audio = val > 0; 1741 intel_sdvo->has_hdmi_audio = has_audio;
1698 goto done; 1742 goto done;
1699 } 1743 }
1700 1744
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 93206e4eaa6f..fe4a53a50b83 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1234,7 +1234,8 @@ static const struct drm_display_mode reported_modes[] = {
1234 * \return false if TV is disconnected. 1234 * \return false if TV is disconnected.
1235 */ 1235 */
1236static int 1236static int
1237intel_tv_detect_type (struct intel_tv *intel_tv) 1237intel_tv_detect_type (struct intel_tv *intel_tv,
1238 struct drm_connector *connector)
1238{ 1239{
1239 struct drm_encoder *encoder = &intel_tv->base.base; 1240 struct drm_encoder *encoder = &intel_tv->base.base;
1240 struct drm_device *dev = encoder->dev; 1241 struct drm_device *dev = encoder->dev;
@@ -1245,11 +1246,13 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
1245 int type; 1246 int type;
1246 1247
1247 /* Disable TV interrupts around load detect or we'll recurse */ 1248 /* Disable TV interrupts around load detect or we'll recurse */
1248 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1249 if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
1249 i915_disable_pipestat(dev_priv, 0, 1250 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1250 PIPE_HOTPLUG_INTERRUPT_ENABLE | 1251 i915_disable_pipestat(dev_priv, 0,
1251 PIPE_HOTPLUG_TV_INTERRUPT_ENABLE); 1252 PIPE_HOTPLUG_INTERRUPT_ENABLE |
1252 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1253 PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
1254 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1255 }
1253 1256
1254 save_tv_dac = tv_dac = I915_READ(TV_DAC); 1257 save_tv_dac = tv_dac = I915_READ(TV_DAC);
1255 save_tv_ctl = tv_ctl = I915_READ(TV_CTL); 1258 save_tv_ctl = tv_ctl = I915_READ(TV_CTL);
@@ -1302,11 +1305,13 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
1302 I915_WRITE(TV_CTL, save_tv_ctl); 1305 I915_WRITE(TV_CTL, save_tv_ctl);
1303 1306
1304 /* Restore interrupt config */ 1307 /* Restore interrupt config */
1305 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1308 if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
1306 i915_enable_pipestat(dev_priv, 0, 1309 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1307 PIPE_HOTPLUG_INTERRUPT_ENABLE | 1310 i915_enable_pipestat(dev_priv, 0,
1308 PIPE_HOTPLUG_TV_INTERRUPT_ENABLE); 1311 PIPE_HOTPLUG_INTERRUPT_ENABLE |
1309 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1312 PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
1313 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1314 }
1310 1315
1311 return type; 1316 return type;
1312} 1317}
@@ -1356,7 +1361,7 @@ intel_tv_detect(struct drm_connector *connector, bool force)
1356 drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V); 1361 drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V);
1357 1362
1358 if (intel_tv->base.base.crtc && intel_tv->base.base.crtc->enabled) { 1363 if (intel_tv->base.base.crtc && intel_tv->base.base.crtc->enabled) {
1359 type = intel_tv_detect_type(intel_tv); 1364 type = intel_tv_detect_type(intel_tv, connector);
1360 } else if (force) { 1365 } else if (force) {
1361 struct drm_crtc *crtc; 1366 struct drm_crtc *crtc;
1362 int dpms_mode; 1367 int dpms_mode;
@@ -1364,7 +1369,7 @@ intel_tv_detect(struct drm_connector *connector, bool force)
1364 crtc = intel_get_load_detect_pipe(&intel_tv->base, connector, 1369 crtc = intel_get_load_detect_pipe(&intel_tv->base, connector,
1365 &mode, &dpms_mode); 1370 &mode, &dpms_mode);
1366 if (crtc) { 1371 if (crtc) {
1367 type = intel_tv_detect_type(intel_tv); 1372 type = intel_tv_detect_type(intel_tv, connector);
1368 intel_release_load_detect_pipe(&intel_tv->base, connector, 1373 intel_release_load_detect_pipe(&intel_tv->base, connector,
1369 dpms_mode); 1374 dpms_mode);
1370 } else 1375 } else
@@ -1658,6 +1663,18 @@ intel_tv_init(struct drm_device *dev)
1658 intel_encoder = &intel_tv->base; 1663 intel_encoder = &intel_tv->base;
1659 connector = &intel_connector->base; 1664 connector = &intel_connector->base;
1660 1665
1666 /* The documentation, for the older chipsets at least, recommend
1667 * using a polling method rather than hotplug detection for TVs.
1668 * This is because in order to perform the hotplug detection, the PLLs
1669 * for the TV must be kept alive increasing power drain and starving
1670 * bandwidth from other encoders. Notably for instance, it causes
1671 * pipe underruns on Crestline when this encoder is supposedly idle.
1672 *
1673 * More recent chipsets favour HDMI rather than integrated S-Video.
1674 */
1675 connector->polled =
1676 DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
1677
1661 drm_connector_init(dev, connector, &intel_tv_connector_funcs, 1678 drm_connector_init(dev, connector, &intel_tv_connector_funcs,
1662 DRM_MODE_CONNECTOR_SVIDEO); 1679 DRM_MODE_CONNECTOR_SVIDEO);
1663 1680
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 49e5e99917e2..6bdab891c64e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -6228,7 +6228,7 @@ parse_dcb15_entry(struct drm_device *dev, struct dcb_table *dcb,
6228 entry->tvconf.has_component_output = false; 6228 entry->tvconf.has_component_output = false;
6229 break; 6229 break;
6230 case OUTPUT_LVDS: 6230 case OUTPUT_LVDS:
6231 if ((conn & 0x00003f00) != 0x10) 6231 if ((conn & 0x00003f00) >> 8 != 0x10)
6232 entry->lvdsconf.use_straps_for_mode = true; 6232 entry->lvdsconf.use_straps_for_mode = true;
6233 entry->lvdsconf.use_power_scripts = true; 6233 entry->lvdsconf.use_power_scripts = true;
6234 break; 6234 break;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index a7fae26f4654..d38a4d9f9b0b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -128,6 +128,7 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
128 } 128 }
129 } 129 }
130 130
131 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
131 nouveau_bo_placement_set(nvbo, flags, 0); 132 nouveau_bo_placement_set(nvbo, flags, 0);
132 133
133 nvbo->channel = chan; 134 nvbo->channel = chan;
@@ -166,17 +167,17 @@ static void
166set_placement_range(struct nouveau_bo *nvbo, uint32_t type) 167set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
167{ 168{
168 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); 169 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
170 int vram_pages = dev_priv->vram_size >> PAGE_SHIFT;
169 171
170 if (dev_priv->card_type == NV_10 && 172 if (dev_priv->card_type == NV_10 &&
171 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM)) { 173 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
174 nvbo->bo.mem.num_pages < vram_pages / 2) {
172 /* 175 /*
173 * Make sure that the color and depth buffers are handled 176 * Make sure that the color and depth buffers are handled
174 * by independent memory controller units. Up to a 9x 177 * by independent memory controller units. Up to a 9x
175 * speed up when alpha-blending and depth-test are enabled 178 * speed up when alpha-blending and depth-test are enabled
176 * at the same time. 179 * at the same time.
177 */ 180 */
178 int vram_pages = dev_priv->vram_size >> PAGE_SHIFT;
179
180 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) { 181 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
181 nvbo->placement.fpfn = vram_pages / 2; 182 nvbo->placement.fpfn = vram_pages / 2;
182 nvbo->placement.lpfn = ~0; 183 nvbo->placement.lpfn = ~0;
@@ -785,7 +786,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
785 if (ret) 786 if (ret)
786 goto out; 787 goto out;
787 788
788 ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); 789 ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
789out: 790out:
790 ttm_bo_mem_put(bo, &tmp_mem); 791 ttm_bo_mem_put(bo, &tmp_mem);
791 return ret; 792 return ret;
@@ -811,11 +812,11 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
811 if (ret) 812 if (ret)
812 return ret; 813 return ret;
813 814
814 ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, &tmp_mem); 815 ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
815 if (ret) 816 if (ret)
816 goto out; 817 goto out;
817 818
818 ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem); 819 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, new_mem);
819 if (ret) 820 if (ret)
820 goto out; 821 goto out;
821 822
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index a21e00076839..390d82c3c4b0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -507,6 +507,7 @@ nouveau_connector_native_mode(struct drm_connector *connector)
507 int high_w = 0, high_h = 0, high_v = 0; 507 int high_w = 0, high_h = 0, high_v = 0;
508 508
509 list_for_each_entry(mode, &nv_connector->base.probed_modes, head) { 509 list_for_each_entry(mode, &nv_connector->base.probed_modes, head) {
510 mode->vrefresh = drm_mode_vrefresh(mode);
510 if (helper->mode_valid(connector, mode) != MODE_OK || 511 if (helper->mode_valid(connector, mode) != MODE_OK ||
511 (mode->flags & DRM_MODE_FLAG_INTERLACE)) 512 (mode->flags & DRM_MODE_FLAG_INTERLACE))
512 continue; 513 continue;
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c
index fb846a3fef15..4399e2f34db4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_pm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.c
@@ -443,7 +443,7 @@ nouveau_hwmon_fini(struct drm_device *dev)
443 struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 443 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
444 444
445 if (pm->hwmon) { 445 if (pm->hwmon) {
446 sysfs_remove_group(&pm->hwmon->kobj, &hwmon_attrgroup); 446 sysfs_remove_group(&dev->pdev->dev.kobj, &hwmon_attrgroup);
447 hwmon_device_unregister(pm->hwmon); 447 hwmon_device_unregister(pm->hwmon);
448 } 448 }
449#endif 449#endif
@@ -543,7 +543,7 @@ nouveau_pm_resume(struct drm_device *dev)
543 struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 543 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
544 struct nouveau_pm_level *perflvl; 544 struct nouveau_pm_level *perflvl;
545 545
546 if (pm->cur == &pm->boot) 546 if (!pm->cur || pm->cur == &pm->boot)
547 return; 547 return;
548 548
549 perflvl = pm->cur; 549 perflvl = pm->cur;
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
index ef23550407b5..c82db37d9f41 100644
--- a/drivers/gpu/drm/nouveau/nv04_dfp.c
+++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
@@ -342,8 +342,8 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
342 if (nv_encoder->dcb->type == OUTPUT_LVDS) { 342 if (nv_encoder->dcb->type == OUTPUT_LVDS) {
343 bool duallink, dummy; 343 bool duallink, dummy;
344 344
345 nouveau_bios_parse_lvds_table(dev, nv_connector->native_mode-> 345 nouveau_bios_parse_lvds_table(dev, output_mode->clock,
346 clock, &duallink, &dummy); 346 &duallink, &dummy);
347 if (duallink) 347 if (duallink)
348 regp->fp_control |= (8 << 28); 348 regp->fp_control |= (8 << 28);
349 } else 349 } else
@@ -518,8 +518,6 @@ static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode)
518 return; 518 return;
519 519
520 if (nv_encoder->dcb->lvdsconf.use_power_scripts) { 520 if (nv_encoder->dcb->lvdsconf.use_power_scripts) {
521 struct nouveau_connector *nv_connector = nouveau_encoder_connector_get(nv_encoder);
522
523 /* when removing an output, crtc may not be set, but PANEL_OFF 521 /* when removing an output, crtc may not be set, but PANEL_OFF
524 * must still be run 522 * must still be run
525 */ 523 */
@@ -527,12 +525,8 @@ static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode)
527 nv04_dfp_get_bound_head(dev, nv_encoder->dcb); 525 nv04_dfp_get_bound_head(dev, nv_encoder->dcb);
528 526
529 if (mode == DRM_MODE_DPMS_ON) { 527 if (mode == DRM_MODE_DPMS_ON) {
530 if (!nv_connector->native_mode) {
531 NV_ERROR(dev, "Not turning on LVDS without native mode\n");
532 return;
533 }
534 call_lvds_script(dev, nv_encoder->dcb, head, 528 call_lvds_script(dev, nv_encoder->dcb, head,
535 LVDS_PANEL_ON, nv_connector->native_mode->clock); 529 LVDS_PANEL_ON, nv_encoder->mode.clock);
536 } else 530 } else
537 /* pxclk of 0 is fine for PANEL_OFF, and for a 531 /* pxclk of 0 is fine for PANEL_OFF, and for a
538 * disconnected LVDS encoder there is no native_mode 532 * disconnected LVDS encoder there is no native_mode
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c
index 8870d72388c8..18d30c2c1aa6 100644
--- a/drivers/gpu/drm/nouveau/nv40_graph.c
+++ b/drivers/gpu/drm/nouveau/nv40_graph.c
@@ -211,18 +211,32 @@ nv40_graph_set_tile_region(struct drm_device *dev, int i)
211 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; 211 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
212 212
213 switch (dev_priv->chipset) { 213 switch (dev_priv->chipset) {
214 case 0x40:
215 case 0x41: /* guess */
216 case 0x42:
217 case 0x43:
218 case 0x45: /* guess */
219 case 0x4e:
220 nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
221 nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
222 nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
223 nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch);
224 nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
225 nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
226 break;
214 case 0x44: 227 case 0x44:
215 case 0x4a: 228 case 0x4a:
216 case 0x4e:
217 nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch); 229 nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
218 nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit); 230 nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
219 nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr); 231 nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
220 break; 232 break;
221
222 case 0x46: 233 case 0x46:
223 case 0x47: 234 case 0x47:
224 case 0x49: 235 case 0x49:
225 case 0x4b: 236 case 0x4b:
237 case 0x4c:
238 case 0x67:
239 default:
226 nv_wr32(dev, NV47_PGRAPH_TSIZE(i), tile->pitch); 240 nv_wr32(dev, NV47_PGRAPH_TSIZE(i), tile->pitch);
227 nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), tile->limit); 241 nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), tile->limit);
228 nv_wr32(dev, NV47_PGRAPH_TILE(i), tile->addr); 242 nv_wr32(dev, NV47_PGRAPH_TILE(i), tile->addr);
@@ -230,15 +244,6 @@ nv40_graph_set_tile_region(struct drm_device *dev, int i)
230 nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit); 244 nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
231 nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr); 245 nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
232 break; 246 break;
233
234 default:
235 nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
236 nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
237 nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
238 nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch);
239 nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
240 nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
241 break;
242 } 247 }
243} 248}
244 249
@@ -396,17 +401,20 @@ nv40_graph_init(struct drm_device *dev)
396 break; 401 break;
397 default: 402 default:
398 switch (dev_priv->chipset) { 403 switch (dev_priv->chipset) {
399 case 0x46: 404 case 0x41:
400 case 0x47: 405 case 0x42:
401 case 0x49: 406 case 0x43:
402 case 0x4b: 407 case 0x45:
403 nv_wr32(dev, 0x400DF0, nv_rd32(dev, NV04_PFB_CFG0)); 408 case 0x4e:
404 nv_wr32(dev, 0x400DF4, nv_rd32(dev, NV04_PFB_CFG1)); 409 case 0x44:
405 break; 410 case 0x4a:
406 default:
407 nv_wr32(dev, 0x4009F0, nv_rd32(dev, NV04_PFB_CFG0)); 411 nv_wr32(dev, 0x4009F0, nv_rd32(dev, NV04_PFB_CFG0));
408 nv_wr32(dev, 0x4009F4, nv_rd32(dev, NV04_PFB_CFG1)); 412 nv_wr32(dev, 0x4009F4, nv_rd32(dev, NV04_PFB_CFG1));
409 break; 413 break;
414 default:
415 nv_wr32(dev, 0x400DF0, nv_rd32(dev, NV04_PFB_CFG0));
416 nv_wr32(dev, 0x400DF4, nv_rd32(dev, NV04_PFB_CFG1));
417 break;
410 } 418 }
411 nv_wr32(dev, 0x4069F0, nv_rd32(dev, NV04_PFB_CFG0)); 419 nv_wr32(dev, 0x4069F0, nv_rd32(dev, NV04_PFB_CFG0));
412 nv_wr32(dev, 0x4069F4, nv_rd32(dev, NV04_PFB_CFG1)); 420 nv_wr32(dev, 0x4069F4, nv_rd32(dev, NV04_PFB_CFG1));
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c
index 14e24e906ee8..0ea090f4244a 100644
--- a/drivers/gpu/drm/nouveau/nv50_evo.c
+++ b/drivers/gpu/drm/nouveau/nv50_evo.c
@@ -283,8 +283,7 @@ nv50_evo_create(struct drm_device *dev)
283 nv50_evo_channel_del(&dev_priv->evo); 283 nv50_evo_channel_del(&dev_priv->evo);
284 return ret; 284 return ret;
285 } 285 }
286 } else 286 } else {
287 if (dev_priv->chipset != 0x50) {
288 ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB16, 0x70, 0x19, 287 ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB16, 0x70, 0x19,
289 0, 0xffffffff, 0x00010000); 288 0, 0xffffffff, 0x00010000);
290 if (ret) { 289 if (ret) {
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 842954fe74c5..a4e5e53e0a62 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -48,29 +48,29 @@ static void atombios_overscan_setup(struct drm_crtc *crtc,
48 48
49 switch (radeon_crtc->rmx_type) { 49 switch (radeon_crtc->rmx_type) {
50 case RMX_CENTER: 50 case RMX_CENTER:
51 args.usOverscanTop = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2; 51 args.usOverscanTop = cpu_to_le16((adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2);
52 args.usOverscanBottom = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2; 52 args.usOverscanBottom = cpu_to_le16((adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2);
53 args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2; 53 args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2);
54 args.usOverscanRight = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2; 54 args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2);
55 break; 55 break;
56 case RMX_ASPECT: 56 case RMX_ASPECT:
57 a1 = mode->crtc_vdisplay * adjusted_mode->crtc_hdisplay; 57 a1 = mode->crtc_vdisplay * adjusted_mode->crtc_hdisplay;
58 a2 = adjusted_mode->crtc_vdisplay * mode->crtc_hdisplay; 58 a2 = adjusted_mode->crtc_vdisplay * mode->crtc_hdisplay;
59 59
60 if (a1 > a2) { 60 if (a1 > a2) {
61 args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2; 61 args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2);
62 args.usOverscanRight = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2; 62 args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2);
63 } else if (a2 > a1) { 63 } else if (a2 > a1) {
64 args.usOverscanLeft = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2; 64 args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2);
65 args.usOverscanRight = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2; 65 args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2);
66 } 66 }
67 break; 67 break;
68 case RMX_FULL: 68 case RMX_FULL:
69 default: 69 default:
70 args.usOverscanRight = radeon_crtc->h_border; 70 args.usOverscanRight = cpu_to_le16(radeon_crtc->h_border);
71 args.usOverscanLeft = radeon_crtc->h_border; 71 args.usOverscanLeft = cpu_to_le16(radeon_crtc->h_border);
72 args.usOverscanBottom = radeon_crtc->v_border; 72 args.usOverscanBottom = cpu_to_le16(radeon_crtc->v_border);
73 args.usOverscanTop = radeon_crtc->v_border; 73 args.usOverscanTop = cpu_to_le16(radeon_crtc->v_border);
74 break; 74 break;
75 } 75 }
76 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 76 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
@@ -419,23 +419,23 @@ static void atombios_crtc_program_ss(struct drm_crtc *crtc,
419 memset(&args, 0, sizeof(args)); 419 memset(&args, 0, sizeof(args));
420 420
421 if (ASIC_IS_DCE5(rdev)) { 421 if (ASIC_IS_DCE5(rdev)) {
422 args.v3.usSpreadSpectrumAmountFrac = 0; 422 args.v3.usSpreadSpectrumAmountFrac = cpu_to_le16(0);
423 args.v3.ucSpreadSpectrumType = ss->type; 423 args.v3.ucSpreadSpectrumType = ss->type;
424 switch (pll_id) { 424 switch (pll_id) {
425 case ATOM_PPLL1: 425 case ATOM_PPLL1:
426 args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P1PLL; 426 args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P1PLL;
427 args.v3.usSpreadSpectrumAmount = ss->amount; 427 args.v3.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
428 args.v3.usSpreadSpectrumStep = ss->step; 428 args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step);
429 break; 429 break;
430 case ATOM_PPLL2: 430 case ATOM_PPLL2:
431 args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P2PLL; 431 args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P2PLL;
432 args.v3.usSpreadSpectrumAmount = ss->amount; 432 args.v3.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
433 args.v3.usSpreadSpectrumStep = ss->step; 433 args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step);
434 break; 434 break;
435 case ATOM_DCPLL: 435 case ATOM_DCPLL:
436 args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_DCPLL; 436 args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_DCPLL;
437 args.v3.usSpreadSpectrumAmount = 0; 437 args.v3.usSpreadSpectrumAmount = cpu_to_le16(0);
438 args.v3.usSpreadSpectrumStep = 0; 438 args.v3.usSpreadSpectrumStep = cpu_to_le16(0);
439 break; 439 break;
440 case ATOM_PPLL_INVALID: 440 case ATOM_PPLL_INVALID:
441 return; 441 return;
@@ -447,18 +447,18 @@ static void atombios_crtc_program_ss(struct drm_crtc *crtc,
447 switch (pll_id) { 447 switch (pll_id) {
448 case ATOM_PPLL1: 448 case ATOM_PPLL1:
449 args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P1PLL; 449 args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P1PLL;
450 args.v2.usSpreadSpectrumAmount = ss->amount; 450 args.v2.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
451 args.v2.usSpreadSpectrumStep = ss->step; 451 args.v2.usSpreadSpectrumStep = cpu_to_le16(ss->step);
452 break; 452 break;
453 case ATOM_PPLL2: 453 case ATOM_PPLL2:
454 args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P2PLL; 454 args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P2PLL;
455 args.v2.usSpreadSpectrumAmount = ss->amount; 455 args.v2.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
456 args.v2.usSpreadSpectrumStep = ss->step; 456 args.v2.usSpreadSpectrumStep = cpu_to_le16(ss->step);
457 break; 457 break;
458 case ATOM_DCPLL: 458 case ATOM_DCPLL:
459 args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_DCPLL; 459 args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_DCPLL;
460 args.v2.usSpreadSpectrumAmount = 0; 460 args.v2.usSpreadSpectrumAmount = cpu_to_le16(0);
461 args.v2.usSpreadSpectrumStep = 0; 461 args.v2.usSpreadSpectrumStep = cpu_to_le16(0);
462 break; 462 break;
463 case ATOM_PPLL_INVALID: 463 case ATOM_PPLL_INVALID:
464 return; 464 return;
@@ -538,7 +538,6 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
538 pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; 538 pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
539 else 539 else
540 pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV; 540 pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
541
542 } 541 }
543 542
544 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 543 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
@@ -555,23 +554,28 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
555 dp_clock = dig_connector->dp_clock; 554 dp_clock = dig_connector->dp_clock;
556 } 555 }
557 } 556 }
558#if 0 /* doesn't work properly on some laptops */ 557
559 /* use recommended ref_div for ss */ 558 /* use recommended ref_div for ss */
560 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { 559 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
561 if (ss_enabled) { 560 if (ss_enabled) {
562 if (ss->refdiv) { 561 if (ss->refdiv) {
562 pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
563 pll->flags |= RADEON_PLL_USE_REF_DIV; 563 pll->flags |= RADEON_PLL_USE_REF_DIV;
564 pll->reference_div = ss->refdiv; 564 pll->reference_div = ss->refdiv;
565 if (ASIC_IS_AVIVO(rdev))
566 pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
565 } 567 }
566 } 568 }
567 } 569 }
568#endif 570
569 if (ASIC_IS_AVIVO(rdev)) { 571 if (ASIC_IS_AVIVO(rdev)) {
570 /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ 572 /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
571 if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1) 573 if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
572 adjusted_clock = mode->clock * 2; 574 adjusted_clock = mode->clock * 2;
573 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) 575 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
574 pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER; 576 pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER;
577 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
578 pll->flags |= RADEON_PLL_IS_LCD;
575 } else { 579 } else {
576 if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) 580 if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
577 pll->flags |= RADEON_PLL_NO_ODD_POST_DIV; 581 pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
@@ -658,10 +662,12 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
658 index, (uint32_t *)&args); 662 index, (uint32_t *)&args);
659 adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10; 663 adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10;
660 if (args.v3.sOutput.ucRefDiv) { 664 if (args.v3.sOutput.ucRefDiv) {
665 pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
661 pll->flags |= RADEON_PLL_USE_REF_DIV; 666 pll->flags |= RADEON_PLL_USE_REF_DIV;
662 pll->reference_div = args.v3.sOutput.ucRefDiv; 667 pll->reference_div = args.v3.sOutput.ucRefDiv;
663 } 668 }
664 if (args.v3.sOutput.ucPostDiv) { 669 if (args.v3.sOutput.ucPostDiv) {
670 pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
665 pll->flags |= RADEON_PLL_USE_POST_DIV; 671 pll->flags |= RADEON_PLL_USE_POST_DIV;
666 pll->post_div = args.v3.sOutput.ucPostDiv; 672 pll->post_div = args.v3.sOutput.ucPostDiv;
667 } 673 }
@@ -715,14 +721,14 @@ static void atombios_crtc_set_dcpll(struct drm_crtc *crtc,
715 * SetPixelClock provides the dividers 721 * SetPixelClock provides the dividers
716 */ 722 */
717 args.v5.ucCRTC = ATOM_CRTC_INVALID; 723 args.v5.ucCRTC = ATOM_CRTC_INVALID;
718 args.v5.usPixelClock = dispclk; 724 args.v5.usPixelClock = cpu_to_le16(dispclk);
719 args.v5.ucPpll = ATOM_DCPLL; 725 args.v5.ucPpll = ATOM_DCPLL;
720 break; 726 break;
721 case 6: 727 case 6:
722 /* if the default dcpll clock is specified, 728 /* if the default dcpll clock is specified,
723 * SetPixelClock provides the dividers 729 * SetPixelClock provides the dividers
724 */ 730 */
725 args.v6.ulDispEngClkFreq = dispclk; 731 args.v6.ulDispEngClkFreq = cpu_to_le32(dispclk);
726 args.v6.ucPpll = ATOM_DCPLL; 732 args.v6.ucPpll = ATOM_DCPLL;
727 break; 733 break;
728 default: 734 default:
@@ -951,8 +957,12 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
951 /* adjust pixel clock as needed */ 957 /* adjust pixel clock as needed */
952 adjusted_clock = atombios_adjust_pll(crtc, mode, pll, ss_enabled, &ss); 958 adjusted_clock = atombios_adjust_pll(crtc, mode, pll, ss_enabled, &ss);
953 959
954 radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, 960 if (ASIC_IS_AVIVO(rdev))
955 &ref_div, &post_div); 961 radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
962 &ref_div, &post_div);
963 else
964 radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
965 &ref_div, &post_div);
956 966
957 atombios_crtc_program_ss(crtc, ATOM_DISABLE, radeon_crtc->pll_id, &ss); 967 atombios_crtc_program_ss(crtc, ATOM_DISABLE, radeon_crtc->pll_id, &ss);
958 968
@@ -981,9 +991,9 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
981 } 991 }
982} 992}
983 993
984static int evergreen_crtc_do_set_base(struct drm_crtc *crtc, 994static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
985 struct drm_framebuffer *fb, 995 struct drm_framebuffer *fb,
986 int x, int y, int atomic) 996 int x, int y, int atomic)
987{ 997{
988 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 998 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
989 struct drm_device *dev = crtc->dev; 999 struct drm_device *dev = crtc->dev;
@@ -1123,12 +1133,6 @@ static int evergreen_crtc_do_set_base(struct drm_crtc *crtc,
1123 WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset, 1133 WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
1124 (crtc->mode.hdisplay << 16) | crtc->mode.vdisplay); 1134 (crtc->mode.hdisplay << 16) | crtc->mode.vdisplay);
1125 1135
1126 if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)
1127 WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset,
1128 EVERGREEN_INTERLEAVE_EN);
1129 else
1130 WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
1131
1132 if (!atomic && fb && fb != crtc->fb) { 1136 if (!atomic && fb && fb != crtc->fb) {
1133 radeon_fb = to_radeon_framebuffer(fb); 1137 radeon_fb = to_radeon_framebuffer(fb);
1134 rbo = radeon_fb->obj->driver_private; 1138 rbo = radeon_fb->obj->driver_private;
@@ -1286,12 +1290,6 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
1286 WREG32(AVIVO_D1MODE_VIEWPORT_SIZE + radeon_crtc->crtc_offset, 1290 WREG32(AVIVO_D1MODE_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
1287 (crtc->mode.hdisplay << 16) | crtc->mode.vdisplay); 1291 (crtc->mode.hdisplay << 16) | crtc->mode.vdisplay);
1288 1292
1289 if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)
1290 WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset,
1291 AVIVO_D1MODE_INTERLEAVE_EN);
1292 else
1293 WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
1294
1295 if (!atomic && fb && fb != crtc->fb) { 1293 if (!atomic && fb && fb != crtc->fb) {
1296 radeon_fb = to_radeon_framebuffer(fb); 1294 radeon_fb = to_radeon_framebuffer(fb);
1297 rbo = radeon_fb->obj->driver_private; 1295 rbo = radeon_fb->obj->driver_private;
@@ -1315,7 +1313,7 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
1315 struct radeon_device *rdev = dev->dev_private; 1313 struct radeon_device *rdev = dev->dev_private;
1316 1314
1317 if (ASIC_IS_DCE4(rdev)) 1315 if (ASIC_IS_DCE4(rdev))
1318 return evergreen_crtc_do_set_base(crtc, old_fb, x, y, 0); 1316 return dce4_crtc_do_set_base(crtc, old_fb, x, y, 0);
1319 else if (ASIC_IS_AVIVO(rdev)) 1317 else if (ASIC_IS_AVIVO(rdev))
1320 return avivo_crtc_do_set_base(crtc, old_fb, x, y, 0); 1318 return avivo_crtc_do_set_base(crtc, old_fb, x, y, 0);
1321 else 1319 else
@@ -1330,7 +1328,7 @@ int atombios_crtc_set_base_atomic(struct drm_crtc *crtc,
1330 struct radeon_device *rdev = dev->dev_private; 1328 struct radeon_device *rdev = dev->dev_private;
1331 1329
1332 if (ASIC_IS_DCE4(rdev)) 1330 if (ASIC_IS_DCE4(rdev))
1333 return evergreen_crtc_do_set_base(crtc, fb, x, y, 1); 1331 return dce4_crtc_do_set_base(crtc, fb, x, y, 1);
1334 else if (ASIC_IS_AVIVO(rdev)) 1332 else if (ASIC_IS_AVIVO(rdev))
1335 return avivo_crtc_do_set_base(crtc, fb, x, y, 1); 1333 return avivo_crtc_do_set_base(crtc, fb, x, y, 1);
1336 else 1334 else
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 677af91b555c..d270b3ff896b 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -97,26 +97,29 @@ u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
97} 97}
98 98
99/* get temperature in millidegrees */ 99/* get temperature in millidegrees */
100u32 evergreen_get_temp(struct radeon_device *rdev) 100int evergreen_get_temp(struct radeon_device *rdev)
101{ 101{
102 u32 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >> 102 u32 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >>
103 ASIC_T_SHIFT; 103 ASIC_T_SHIFT;
104 u32 actual_temp = 0; 104 u32 actual_temp = 0;
105 105
106 if ((temp >> 10) & 1) 106 if (temp & 0x400)
107 actual_temp = 0; 107 actual_temp = -256;
108 else if ((temp >> 9) & 1) 108 else if (temp & 0x200)
109 actual_temp = 255; 109 actual_temp = 255;
110 else 110 else if (temp & 0x100) {
111 actual_temp = (temp >> 1) & 0xff; 111 actual_temp = temp & 0x1ff;
112 actual_temp |= ~0x1ff;
113 } else
114 actual_temp = temp & 0xff;
112 115
113 return actual_temp * 1000; 116 return (actual_temp * 1000) / 2;
114} 117}
115 118
116u32 sumo_get_temp(struct radeon_device *rdev) 119int sumo_get_temp(struct radeon_device *rdev)
117{ 120{
118 u32 temp = RREG32(CG_THERMAL_STATUS) & 0xff; 121 u32 temp = RREG32(CG_THERMAL_STATUS) & 0xff;
119 u32 actual_temp = (temp >> 1) & 0xff; 122 int actual_temp = temp - 49;
120 123
121 return actual_temp * 1000; 124 return actual_temp * 1000;
122} 125}
@@ -1182,6 +1185,22 @@ static void evergreen_mc_program(struct radeon_device *rdev)
1182/* 1185/*
1183 * CP. 1186 * CP.
1184 */ 1187 */
1188void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
1189{
1190 /* set to DX10/11 mode */
1191 radeon_ring_write(rdev, PACKET3(PACKET3_MODE_CONTROL, 0));
1192 radeon_ring_write(rdev, 1);
1193 /* FIXME: implement */
1194 radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
1195 radeon_ring_write(rdev,
1196#ifdef __BIG_ENDIAN
1197 (2 << 0) |
1198#endif
1199 (ib->gpu_addr & 0xFFFFFFFC));
1200 radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
1201 radeon_ring_write(rdev, ib->length_dw);
1202}
1203
1185 1204
1186static int evergreen_cp_load_microcode(struct radeon_device *rdev) 1205static int evergreen_cp_load_microcode(struct radeon_device *rdev)
1187{ 1206{
@@ -1192,7 +1211,11 @@ static int evergreen_cp_load_microcode(struct radeon_device *rdev)
1192 return -EINVAL; 1211 return -EINVAL;
1193 1212
1194 r700_cp_stop(rdev); 1213 r700_cp_stop(rdev);
1195 WREG32(CP_RB_CNTL, RB_NO_UPDATE | (15 << 8) | (3 << 0)); 1214 WREG32(CP_RB_CNTL,
1215#ifdef __BIG_ENDIAN
1216 BUF_SWAP_32BIT |
1217#endif
1218 RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
1196 1219
1197 fw_data = (const __be32 *)rdev->pfp_fw->data; 1220 fw_data = (const __be32 *)rdev->pfp_fw->data;
1198 WREG32(CP_PFP_UCODE_ADDR, 0); 1221 WREG32(CP_PFP_UCODE_ADDR, 0);
@@ -1233,7 +1256,7 @@ static int evergreen_cp_start(struct radeon_device *rdev)
1233 cp_me = 0xff; 1256 cp_me = 0xff;
1234 WREG32(CP_ME_CNTL, cp_me); 1257 WREG32(CP_ME_CNTL, cp_me);
1235 1258
1236 r = radeon_ring_lock(rdev, evergreen_default_size + 15); 1259 r = radeon_ring_lock(rdev, evergreen_default_size + 19);
1237 if (r) { 1260 if (r) {
1238 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); 1261 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1239 return r; 1262 return r;
@@ -1266,6 +1289,11 @@ static int evergreen_cp_start(struct radeon_device *rdev)
1266 radeon_ring_write(rdev, 0xffffffff); 1289 radeon_ring_write(rdev, 0xffffffff);
1267 radeon_ring_write(rdev, 0xffffffff); 1290 radeon_ring_write(rdev, 0xffffffff);
1268 1291
1292 radeon_ring_write(rdev, 0xc0026900);
1293 radeon_ring_write(rdev, 0x00000316);
1294 radeon_ring_write(rdev, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
1295 radeon_ring_write(rdev, 0x00000010); /* */
1296
1269 radeon_ring_unlock_commit(rdev); 1297 radeon_ring_unlock_commit(rdev);
1270 1298
1271 return 0; 1299 return 0;
@@ -1306,7 +1334,11 @@ int evergreen_cp_resume(struct radeon_device *rdev)
1306 WREG32(CP_RB_WPTR, 0); 1334 WREG32(CP_RB_WPTR, 0);
1307 1335
1308 /* set the wb address wether it's enabled or not */ 1336 /* set the wb address wether it's enabled or not */
1309 WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC); 1337 WREG32(CP_RB_RPTR_ADDR,
1338#ifdef __BIG_ENDIAN
1339 RB_RPTR_SWAP(2) |
1340#endif
1341 ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
1310 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF); 1342 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
1311 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); 1343 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
1312 1344
@@ -2072,6 +2104,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
2072 WREG32(VGT_CACHE_INVALIDATION, vgt_cache_invalidation); 2104 WREG32(VGT_CACHE_INVALIDATION, vgt_cache_invalidation);
2073 2105
2074 WREG32(VGT_GS_VERTEX_REUSE, 16); 2106 WREG32(VGT_GS_VERTEX_REUSE, 16);
2107 WREG32(PA_SU_LINE_STIPPLE_VALUE, 0);
2075 WREG32(PA_SC_LINE_STIPPLE_STATE, 0); 2108 WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
2076 2109
2077 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, 14); 2110 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, 14);
@@ -2606,8 +2639,8 @@ restart_ih:
2606 while (rptr != wptr) { 2639 while (rptr != wptr) {
2607 /* wptr/rptr are in bytes! */ 2640 /* wptr/rptr are in bytes! */
2608 ring_index = rptr / 4; 2641 ring_index = rptr / 4;
2609 src_id = rdev->ih.ring[ring_index] & 0xff; 2642 src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
2610 src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff; 2643 src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
2611 2644
2612 switch (src_id) { 2645 switch (src_id) {
2613 case 1: /* D1 vblank/vline */ 2646 case 1: /* D1 vblank/vline */
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
index d4d4db49a8b8..2adfb03f479b 100644
--- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c
+++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
@@ -55,7 +55,7 @@ set_render_target(struct radeon_device *rdev, int format,
55 if (h < 8) 55 if (h < 8)
56 h = 8; 56 h = 8;
57 57
58 cb_color_info = ((format << 2) | (1 << 24)); 58 cb_color_info = ((format << 2) | (1 << 24) | (1 << 8));
59 pitch = (w / 8) - 1; 59 pitch = (w / 8) - 1;
60 slice = ((w * h) / 64) - 1; 60 slice = ((w * h) / 64) - 1;
61 61
@@ -133,6 +133,9 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
133 133
134 /* high addr, stride */ 134 /* high addr, stride */
135 sq_vtx_constant_word2 = ((upper_32_bits(gpu_addr) & 0xff) | (16 << 8)); 135 sq_vtx_constant_word2 = ((upper_32_bits(gpu_addr) & 0xff) | (16 << 8));
136#ifdef __BIG_ENDIAN
137 sq_vtx_constant_word2 |= (2 << 30);
138#endif
136 /* xyzw swizzles */ 139 /* xyzw swizzles */
137 sq_vtx_constant_word3 = (0 << 3) | (1 << 6) | (2 << 9) | (3 << 12); 140 sq_vtx_constant_word3 = (0 << 3) | (1 << 6) | (2 << 9) | (3 << 12);
138 141
@@ -173,7 +176,7 @@ set_tex_resource(struct radeon_device *rdev,
173 sq_tex_resource_word0 = (1 << 0); /* 2D */ 176 sq_tex_resource_word0 = (1 << 0); /* 2D */
174 sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 6) | 177 sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 6) |
175 ((w - 1) << 18)); 178 ((w - 1) << 18));
176 sq_tex_resource_word1 = ((h - 1) << 0); 179 sq_tex_resource_word1 = ((h - 1) << 0) | (1 << 28);
177 /* xyzw swizzles */ 180 /* xyzw swizzles */
178 sq_tex_resource_word4 = (0 << 16) | (1 << 19) | (2 << 22) | (3 << 25); 181 sq_tex_resource_word4 = (0 << 16) | (1 << 19) | (2 << 22) | (3 << 25);
179 182
@@ -221,7 +224,11 @@ draw_auto(struct radeon_device *rdev)
221 radeon_ring_write(rdev, DI_PT_RECTLIST); 224 radeon_ring_write(rdev, DI_PT_RECTLIST);
222 225
223 radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0)); 226 radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0));
224 radeon_ring_write(rdev, DI_INDEX_SIZE_16_BIT); 227 radeon_ring_write(rdev,
228#ifdef __BIG_ENDIAN
229 (2 << 2) |
230#endif
231 DI_INDEX_SIZE_16_BIT);
225 232
226 radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0)); 233 radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0));
227 radeon_ring_write(rdev, 1); 234 radeon_ring_write(rdev, 1);
@@ -232,7 +239,7 @@ draw_auto(struct radeon_device *rdev)
232 239
233} 240}
234 241
235/* emits 34 */ 242/* emits 36 */
236static void 243static void
237set_default_state(struct radeon_device *rdev) 244set_default_state(struct radeon_device *rdev)
238{ 245{
@@ -499,6 +506,10 @@ set_default_state(struct radeon_device *rdev)
499 radeon_ring_write(rdev, 0x00000000); 506 radeon_ring_write(rdev, 0x00000000);
500 radeon_ring_write(rdev, 0x00000000); 507 radeon_ring_write(rdev, 0x00000000);
501 508
509 /* set to DX10/11 mode */
510 radeon_ring_write(rdev, PACKET3(PACKET3_MODE_CONTROL, 0));
511 radeon_ring_write(rdev, 1);
512
502 /* emit an IB pointing at default state */ 513 /* emit an IB pointing at default state */
503 dwords = ALIGN(rdev->r600_blit.state_len, 0x10); 514 dwords = ALIGN(rdev->r600_blit.state_len, 0x10);
504 gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset; 515 gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset;
@@ -537,7 +548,7 @@ static inline uint32_t i2f(uint32_t input)
537int evergreen_blit_init(struct radeon_device *rdev) 548int evergreen_blit_init(struct radeon_device *rdev)
538{ 549{
539 u32 obj_size; 550 u32 obj_size;
540 int r, dwords; 551 int i, r, dwords;
541 void *ptr; 552 void *ptr;
542 u32 packet2s[16]; 553 u32 packet2s[16];
543 int num_packet2s = 0; 554 int num_packet2s = 0;
@@ -553,7 +564,7 @@ int evergreen_blit_init(struct radeon_device *rdev)
553 564
554 dwords = rdev->r600_blit.state_len; 565 dwords = rdev->r600_blit.state_len;
555 while (dwords & 0xf) { 566 while (dwords & 0xf) {
556 packet2s[num_packet2s++] = PACKET2(0); 567 packet2s[num_packet2s++] = cpu_to_le32(PACKET2(0));
557 dwords++; 568 dwords++;
558 } 569 }
559 570
@@ -594,8 +605,10 @@ int evergreen_blit_init(struct radeon_device *rdev)
594 if (num_packet2s) 605 if (num_packet2s)
595 memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4), 606 memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
596 packet2s, num_packet2s * 4); 607 packet2s, num_packet2s * 4);
597 memcpy(ptr + rdev->r600_blit.vs_offset, evergreen_vs, evergreen_vs_size * 4); 608 for (i = 0; i < evergreen_vs_size; i++)
598 memcpy(ptr + rdev->r600_blit.ps_offset, evergreen_ps, evergreen_ps_size * 4); 609 *(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(evergreen_vs[i]);
610 for (i = 0; i < evergreen_ps_size; i++)
611 *(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(evergreen_ps[i]);
599 radeon_bo_kunmap(rdev->r600_blit.shader_obj); 612 radeon_bo_kunmap(rdev->r600_blit.shader_obj);
600 radeon_bo_unreserve(rdev->r600_blit.shader_obj); 613 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
601 614
@@ -679,7 +692,7 @@ int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes)
679 /* calculate number of loops correctly */ 692 /* calculate number of loops correctly */
680 ring_size = num_loops * dwords_per_loop; 693 ring_size = num_loops * dwords_per_loop;
681 /* set default + shaders */ 694 /* set default + shaders */
682 ring_size += 50; /* shaders + def state */ 695 ring_size += 52; /* shaders + def state */
683 ring_size += 10; /* fence emit for VB IB */ 696 ring_size += 10; /* fence emit for VB IB */
684 ring_size += 5; /* done copy */ 697 ring_size += 5; /* done copy */
685 ring_size += 10; /* fence emit for done copy */ 698 ring_size += 10; /* fence emit for done copy */
@@ -687,7 +700,7 @@ int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes)
687 if (r) 700 if (r)
688 return r; 701 return r;
689 702
690 set_default_state(rdev); /* 34 */ 703 set_default_state(rdev); /* 36 */
691 set_shaders(rdev); /* 16 */ 704 set_shaders(rdev); /* 16 */
692 return 0; 705 return 0;
693} 706}
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_shaders.c b/drivers/gpu/drm/radeon/evergreen_blit_shaders.c
index ef1d28c07fbf..3a10399e0066 100644
--- a/drivers/gpu/drm/radeon/evergreen_blit_shaders.c
+++ b/drivers/gpu/drm/radeon/evergreen_blit_shaders.c
@@ -311,11 +311,19 @@ const u32 evergreen_vs[] =
311 0x00000000, 311 0x00000000,
312 0x3c000000, 312 0x3c000000,
313 0x67961001, 313 0x67961001,
314#ifdef __BIG_ENDIAN
315 0x000a0000,
316#else
314 0x00080000, 317 0x00080000,
318#endif
315 0x00000000, 319 0x00000000,
316 0x1c000000, 320 0x1c000000,
317 0x67961000, 321 0x67961000,
322#ifdef __BIG_ENDIAN
323 0x00020008,
324#else
318 0x00000008, 325 0x00000008,
326#endif
319 0x00000000, 327 0x00000000,
320}; 328};
321 329
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 36d32d83d866..eb4acf4528ff 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -98,6 +98,7 @@
98#define BUF_SWAP_32BIT (2 << 16) 98#define BUF_SWAP_32BIT (2 << 16)
99#define CP_RB_RPTR 0x8700 99#define CP_RB_RPTR 0x8700
100#define CP_RB_RPTR_ADDR 0xC10C 100#define CP_RB_RPTR_ADDR 0xC10C
101#define RB_RPTR_SWAP(x) ((x) << 0)
101#define CP_RB_RPTR_ADDR_HI 0xC110 102#define CP_RB_RPTR_ADDR_HI 0xC110
102#define CP_RB_RPTR_WR 0xC108 103#define CP_RB_RPTR_WR 0xC108
103#define CP_RB_WPTR 0xC114 104#define CP_RB_WPTR 0xC114
@@ -240,6 +241,7 @@
240#define FORCE_EOV_MAX_CLK_CNT(x) ((x) << 0) 241#define FORCE_EOV_MAX_CLK_CNT(x) ((x) << 0)
241#define FORCE_EOV_MAX_REZ_CNT(x) ((x) << 16) 242#define FORCE_EOV_MAX_REZ_CNT(x) ((x) << 16)
242#define PA_SC_LINE_STIPPLE 0x28A0C 243#define PA_SC_LINE_STIPPLE 0x28A0C
244#define PA_SU_LINE_STIPPLE_VALUE 0x8A60
243#define PA_SC_LINE_STIPPLE_STATE 0x8B10 245#define PA_SC_LINE_STIPPLE_STATE 0x8B10
244 246
245#define SCRATCH_REG0 0x8500 247#define SCRATCH_REG0 0x8500
@@ -652,6 +654,7 @@
652#define PACKET3_DISPATCH_DIRECT 0x15 654#define PACKET3_DISPATCH_DIRECT 0x15
653#define PACKET3_DISPATCH_INDIRECT 0x16 655#define PACKET3_DISPATCH_INDIRECT 0x16
654#define PACKET3_INDIRECT_BUFFER_END 0x17 656#define PACKET3_INDIRECT_BUFFER_END 0x17
657#define PACKET3_MODE_CONTROL 0x18
655#define PACKET3_SET_PREDICATION 0x20 658#define PACKET3_SET_PREDICATION 0x20
656#define PACKET3_REG_RMW 0x21 659#define PACKET3_REG_RMW 0x21
657#define PACKET3_COND_EXEC 0x22 660#define PACKET3_COND_EXEC 0x22
diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
index 607241c6a8a9..5a82b6b75849 100644
--- a/drivers/gpu/drm/radeon/mkregtable.c
+++ b/drivers/gpu/drm/radeon/mkregtable.c
@@ -673,8 +673,10 @@ static int parser_auth(struct table *t, const char *filename)
673 last_reg = strtol(last_reg_s, NULL, 16); 673 last_reg = strtol(last_reg_s, NULL, 16);
674 674
675 do { 675 do {
676 if (fgets(buf, 1024, file) == NULL) 676 if (fgets(buf, 1024, file) == NULL) {
677 fclose(file);
677 return -1; 678 return -1;
679 }
678 len = strlen(buf); 680 len = strlen(buf);
679 if (ftell(file) == end) 681 if (ftell(file) == end)
680 done = 1; 682 done = 1;
@@ -685,6 +687,7 @@ static int parser_auth(struct table *t, const char *filename)
685 fprintf(stderr, 687 fprintf(stderr,
686 "Error matching regular expression %d in %s\n", 688 "Error matching regular expression %d in %s\n",
687 r, filename); 689 r, filename);
690 fclose(file);
688 return -1; 691 return -1;
689 } else { 692 } else {
690 buf[match[0].rm_eo] = 0; 693 buf[match[0].rm_eo] = 0;
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 5f15820efe12..56deae5bf02e 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -1427,6 +1427,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1427 } 1427 }
1428 track->zb.robj = reloc->robj; 1428 track->zb.robj = reloc->robj;
1429 track->zb.offset = idx_value; 1429 track->zb.offset = idx_value;
1430 track->zb_dirty = true;
1430 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1431 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1431 break; 1432 break;
1432 case RADEON_RB3D_COLOROFFSET: 1433 case RADEON_RB3D_COLOROFFSET:
@@ -1439,6 +1440,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1439 } 1440 }
1440 track->cb[0].robj = reloc->robj; 1441 track->cb[0].robj = reloc->robj;
1441 track->cb[0].offset = idx_value; 1442 track->cb[0].offset = idx_value;
1443 track->cb_dirty = true;
1442 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1444 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1443 break; 1445 break;
1444 case RADEON_PP_TXOFFSET_0: 1446 case RADEON_PP_TXOFFSET_0:
@@ -1454,6 +1456,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1454 } 1456 }
1455 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1457 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1456 track->textures[i].robj = reloc->robj; 1458 track->textures[i].robj = reloc->robj;
1459 track->tex_dirty = true;
1457 break; 1460 break;
1458 case RADEON_PP_CUBIC_OFFSET_T0_0: 1461 case RADEON_PP_CUBIC_OFFSET_T0_0:
1459 case RADEON_PP_CUBIC_OFFSET_T0_1: 1462 case RADEON_PP_CUBIC_OFFSET_T0_1:
@@ -1471,6 +1474,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1471 track->textures[0].cube_info[i].offset = idx_value; 1474 track->textures[0].cube_info[i].offset = idx_value;
1472 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1475 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1473 track->textures[0].cube_info[i].robj = reloc->robj; 1476 track->textures[0].cube_info[i].robj = reloc->robj;
1477 track->tex_dirty = true;
1474 break; 1478 break;
1475 case RADEON_PP_CUBIC_OFFSET_T1_0: 1479 case RADEON_PP_CUBIC_OFFSET_T1_0:
1476 case RADEON_PP_CUBIC_OFFSET_T1_1: 1480 case RADEON_PP_CUBIC_OFFSET_T1_1:
@@ -1488,6 +1492,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1488 track->textures[1].cube_info[i].offset = idx_value; 1492 track->textures[1].cube_info[i].offset = idx_value;
1489 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1493 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1490 track->textures[1].cube_info[i].robj = reloc->robj; 1494 track->textures[1].cube_info[i].robj = reloc->robj;
1495 track->tex_dirty = true;
1491 break; 1496 break;
1492 case RADEON_PP_CUBIC_OFFSET_T2_0: 1497 case RADEON_PP_CUBIC_OFFSET_T2_0:
1493 case RADEON_PP_CUBIC_OFFSET_T2_1: 1498 case RADEON_PP_CUBIC_OFFSET_T2_1:
@@ -1505,9 +1510,12 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1505 track->textures[2].cube_info[i].offset = idx_value; 1510 track->textures[2].cube_info[i].offset = idx_value;
1506 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1511 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1507 track->textures[2].cube_info[i].robj = reloc->robj; 1512 track->textures[2].cube_info[i].robj = reloc->robj;
1513 track->tex_dirty = true;
1508 break; 1514 break;
1509 case RADEON_RE_WIDTH_HEIGHT: 1515 case RADEON_RE_WIDTH_HEIGHT:
1510 track->maxy = ((idx_value >> 16) & 0x7FF); 1516 track->maxy = ((idx_value >> 16) & 0x7FF);
1517 track->cb_dirty = true;
1518 track->zb_dirty = true;
1511 break; 1519 break;
1512 case RADEON_RB3D_COLORPITCH: 1520 case RADEON_RB3D_COLORPITCH:
1513 r = r100_cs_packet_next_reloc(p, &reloc); 1521 r = r100_cs_packet_next_reloc(p, &reloc);
@@ -1528,9 +1536,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1528 ib[idx] = tmp; 1536 ib[idx] = tmp;
1529 1537
1530 track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK; 1538 track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
1539 track->cb_dirty = true;
1531 break; 1540 break;
1532 case RADEON_RB3D_DEPTHPITCH: 1541 case RADEON_RB3D_DEPTHPITCH:
1533 track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK; 1542 track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK;
1543 track->zb_dirty = true;
1534 break; 1544 break;
1535 case RADEON_RB3D_CNTL: 1545 case RADEON_RB3D_CNTL:
1536 switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) { 1546 switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
@@ -1555,6 +1565,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1555 return -EINVAL; 1565 return -EINVAL;
1556 } 1566 }
1557 track->z_enabled = !!(idx_value & RADEON_Z_ENABLE); 1567 track->z_enabled = !!(idx_value & RADEON_Z_ENABLE);
1568 track->cb_dirty = true;
1569 track->zb_dirty = true;
1558 break; 1570 break;
1559 case RADEON_RB3D_ZSTENCILCNTL: 1571 case RADEON_RB3D_ZSTENCILCNTL:
1560 switch (idx_value & 0xf) { 1572 switch (idx_value & 0xf) {
@@ -1572,6 +1584,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1572 default: 1584 default:
1573 break; 1585 break;
1574 } 1586 }
1587 track->zb_dirty = true;
1575 break; 1588 break;
1576 case RADEON_RB3D_ZPASS_ADDR: 1589 case RADEON_RB3D_ZPASS_ADDR:
1577 r = r100_cs_packet_next_reloc(p, &reloc); 1590 r = r100_cs_packet_next_reloc(p, &reloc);
@@ -1588,6 +1601,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1588 uint32_t temp = idx_value >> 4; 1601 uint32_t temp = idx_value >> 4;
1589 for (i = 0; i < track->num_texture; i++) 1602 for (i = 0; i < track->num_texture; i++)
1590 track->textures[i].enabled = !!(temp & (1 << i)); 1603 track->textures[i].enabled = !!(temp & (1 << i));
1604 track->tex_dirty = true;
1591 } 1605 }
1592 break; 1606 break;
1593 case RADEON_SE_VF_CNTL: 1607 case RADEON_SE_VF_CNTL:
@@ -1602,12 +1616,14 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1602 i = (reg - RADEON_PP_TEX_SIZE_0) / 8; 1616 i = (reg - RADEON_PP_TEX_SIZE_0) / 8;
1603 track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1; 1617 track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1;
1604 track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1; 1618 track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
1619 track->tex_dirty = true;
1605 break; 1620 break;
1606 case RADEON_PP_TEX_PITCH_0: 1621 case RADEON_PP_TEX_PITCH_0:
1607 case RADEON_PP_TEX_PITCH_1: 1622 case RADEON_PP_TEX_PITCH_1:
1608 case RADEON_PP_TEX_PITCH_2: 1623 case RADEON_PP_TEX_PITCH_2:
1609 i = (reg - RADEON_PP_TEX_PITCH_0) / 8; 1624 i = (reg - RADEON_PP_TEX_PITCH_0) / 8;
1610 track->textures[i].pitch = idx_value + 32; 1625 track->textures[i].pitch = idx_value + 32;
1626 track->tex_dirty = true;
1611 break; 1627 break;
1612 case RADEON_PP_TXFILTER_0: 1628 case RADEON_PP_TXFILTER_0:
1613 case RADEON_PP_TXFILTER_1: 1629 case RADEON_PP_TXFILTER_1:
@@ -1621,6 +1637,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1621 tmp = (idx_value >> 27) & 0x7; 1637 tmp = (idx_value >> 27) & 0x7;
1622 if (tmp == 2 || tmp == 6) 1638 if (tmp == 2 || tmp == 6)
1623 track->textures[i].roundup_h = false; 1639 track->textures[i].roundup_h = false;
1640 track->tex_dirty = true;
1624 break; 1641 break;
1625 case RADEON_PP_TXFORMAT_0: 1642 case RADEON_PP_TXFORMAT_0:
1626 case RADEON_PP_TXFORMAT_1: 1643 case RADEON_PP_TXFORMAT_1:
@@ -1673,6 +1690,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1673 } 1690 }
1674 track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf); 1691 track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
1675 track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf); 1692 track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf);
1693 track->tex_dirty = true;
1676 break; 1694 break;
1677 case RADEON_PP_CUBIC_FACES_0: 1695 case RADEON_PP_CUBIC_FACES_0:
1678 case RADEON_PP_CUBIC_FACES_1: 1696 case RADEON_PP_CUBIC_FACES_1:
@@ -1683,6 +1701,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1683 track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf); 1701 track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf);
1684 track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf); 1702 track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf);
1685 } 1703 }
1704 track->tex_dirty = true;
1686 break; 1705 break;
1687 default: 1706 default:
1688 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", 1707 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
@@ -3318,9 +3337,9 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
3318 unsigned long size; 3337 unsigned long size;
3319 unsigned prim_walk; 3338 unsigned prim_walk;
3320 unsigned nverts; 3339 unsigned nverts;
3321 unsigned num_cb = track->num_cb; 3340 unsigned num_cb = track->cb_dirty ? track->num_cb : 0;
3322 3341
3323 if (!track->zb_cb_clear && !track->color_channel_mask && 3342 if (num_cb && !track->zb_cb_clear && !track->color_channel_mask &&
3324 !track->blend_read_enable) 3343 !track->blend_read_enable)
3325 num_cb = 0; 3344 num_cb = 0;
3326 3345
@@ -3341,7 +3360,9 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
3341 return -EINVAL; 3360 return -EINVAL;
3342 } 3361 }
3343 } 3362 }
3344 if (track->z_enabled) { 3363 track->cb_dirty = false;
3364
3365 if (track->zb_dirty && track->z_enabled) {
3345 if (track->zb.robj == NULL) { 3366 if (track->zb.robj == NULL) {
3346 DRM_ERROR("[drm] No buffer for z buffer !\n"); 3367 DRM_ERROR("[drm] No buffer for z buffer !\n");
3347 return -EINVAL; 3368 return -EINVAL;
@@ -3358,6 +3379,28 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
3358 return -EINVAL; 3379 return -EINVAL;
3359 } 3380 }
3360 } 3381 }
3382 track->zb_dirty = false;
3383
3384 if (track->aa_dirty && track->aaresolve) {
3385 if (track->aa.robj == NULL) {
3386 DRM_ERROR("[drm] No buffer for AA resolve buffer %d !\n", i);
3387 return -EINVAL;
3388 }
3389 /* I believe the format comes from colorbuffer0. */
3390 size = track->aa.pitch * track->cb[0].cpp * track->maxy;
3391 size += track->aa.offset;
3392 if (size > radeon_bo_size(track->aa.robj)) {
3393 DRM_ERROR("[drm] Buffer too small for AA resolve buffer %d "
3394 "(need %lu have %lu) !\n", i, size,
3395 radeon_bo_size(track->aa.robj));
3396 DRM_ERROR("[drm] AA resolve buffer %d (%u %u %u %u)\n",
3397 i, track->aa.pitch, track->cb[0].cpp,
3398 track->aa.offset, track->maxy);
3399 return -EINVAL;
3400 }
3401 }
3402 track->aa_dirty = false;
3403
3361 prim_walk = (track->vap_vf_cntl >> 4) & 0x3; 3404 prim_walk = (track->vap_vf_cntl >> 4) & 0x3;
3362 if (track->vap_vf_cntl & (1 << 14)) { 3405 if (track->vap_vf_cntl & (1 << 14)) {
3363 nverts = track->vap_alt_nverts; 3406 nverts = track->vap_alt_nverts;
@@ -3417,13 +3460,23 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
3417 prim_walk); 3460 prim_walk);
3418 return -EINVAL; 3461 return -EINVAL;
3419 } 3462 }
3420 return r100_cs_track_texture_check(rdev, track); 3463
3464 if (track->tex_dirty) {
3465 track->tex_dirty = false;
3466 return r100_cs_track_texture_check(rdev, track);
3467 }
3468 return 0;
3421} 3469}
3422 3470
3423void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track) 3471void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track)
3424{ 3472{
3425 unsigned i, face; 3473 unsigned i, face;
3426 3474
3475 track->cb_dirty = true;
3476 track->zb_dirty = true;
3477 track->tex_dirty = true;
3478 track->aa_dirty = true;
3479
3427 if (rdev->family < CHIP_R300) { 3480 if (rdev->family < CHIP_R300) {
3428 track->num_cb = 1; 3481 track->num_cb = 1;
3429 if (rdev->family <= CHIP_RS200) 3482 if (rdev->family <= CHIP_RS200)
@@ -3437,6 +3490,8 @@ void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track
3437 track->num_texture = 16; 3490 track->num_texture = 16;
3438 track->maxy = 4096; 3491 track->maxy = 4096;
3439 track->separate_cube = 0; 3492 track->separate_cube = 0;
3493 track->aaresolve = true;
3494 track->aa.robj = NULL;
3440 } 3495 }
3441 3496
3442 for (i = 0; i < track->num_cb; i++) { 3497 for (i = 0; i < track->num_cb; i++) {
diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h
index af65600e6564..2fef9de7f363 100644
--- a/drivers/gpu/drm/radeon/r100_track.h
+++ b/drivers/gpu/drm/radeon/r100_track.h
@@ -52,14 +52,7 @@ struct r100_cs_track_texture {
52 unsigned compress_format; 52 unsigned compress_format;
53}; 53};
54 54
55struct r100_cs_track_limits {
56 unsigned num_cb;
57 unsigned num_texture;
58 unsigned max_levels;
59};
60
61struct r100_cs_track { 55struct r100_cs_track {
62 struct radeon_device *rdev;
63 unsigned num_cb; 56 unsigned num_cb;
64 unsigned num_texture; 57 unsigned num_texture;
65 unsigned maxy; 58 unsigned maxy;
@@ -73,11 +66,17 @@ struct r100_cs_track {
73 struct r100_cs_track_array arrays[11]; 66 struct r100_cs_track_array arrays[11];
74 struct r100_cs_track_cb cb[R300_MAX_CB]; 67 struct r100_cs_track_cb cb[R300_MAX_CB];
75 struct r100_cs_track_cb zb; 68 struct r100_cs_track_cb zb;
69 struct r100_cs_track_cb aa;
76 struct r100_cs_track_texture textures[R300_TRACK_MAX_TEXTURE]; 70 struct r100_cs_track_texture textures[R300_TRACK_MAX_TEXTURE];
77 bool z_enabled; 71 bool z_enabled;
78 bool separate_cube; 72 bool separate_cube;
79 bool zb_cb_clear; 73 bool zb_cb_clear;
80 bool blend_read_enable; 74 bool blend_read_enable;
75 bool cb_dirty;
76 bool zb_dirty;
77 bool tex_dirty;
78 bool aa_dirty;
79 bool aaresolve;
81}; 80};
82 81
83int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track); 82int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track);
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index d2408c395619..f24058300413 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -184,6 +184,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
184 } 184 }
185 track->zb.robj = reloc->robj; 185 track->zb.robj = reloc->robj;
186 track->zb.offset = idx_value; 186 track->zb.offset = idx_value;
187 track->zb_dirty = true;
187 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 188 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
188 break; 189 break;
189 case RADEON_RB3D_COLOROFFSET: 190 case RADEON_RB3D_COLOROFFSET:
@@ -196,6 +197,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
196 } 197 }
197 track->cb[0].robj = reloc->robj; 198 track->cb[0].robj = reloc->robj;
198 track->cb[0].offset = idx_value; 199 track->cb[0].offset = idx_value;
200 track->cb_dirty = true;
199 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 201 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
200 break; 202 break;
201 case R200_PP_TXOFFSET_0: 203 case R200_PP_TXOFFSET_0:
@@ -214,6 +216,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
214 } 216 }
215 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 217 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
216 track->textures[i].robj = reloc->robj; 218 track->textures[i].robj = reloc->robj;
219 track->tex_dirty = true;
217 break; 220 break;
218 case R200_PP_CUBIC_OFFSET_F1_0: 221 case R200_PP_CUBIC_OFFSET_F1_0:
219 case R200_PP_CUBIC_OFFSET_F2_0: 222 case R200_PP_CUBIC_OFFSET_F2_0:
@@ -257,9 +260,12 @@ int r200_packet0_check(struct radeon_cs_parser *p,
257 track->textures[i].cube_info[face - 1].offset = idx_value; 260 track->textures[i].cube_info[face - 1].offset = idx_value;
258 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 261 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
259 track->textures[i].cube_info[face - 1].robj = reloc->robj; 262 track->textures[i].cube_info[face - 1].robj = reloc->robj;
263 track->tex_dirty = true;
260 break; 264 break;
261 case RADEON_RE_WIDTH_HEIGHT: 265 case RADEON_RE_WIDTH_HEIGHT:
262 track->maxy = ((idx_value >> 16) & 0x7FF); 266 track->maxy = ((idx_value >> 16) & 0x7FF);
267 track->cb_dirty = true;
268 track->zb_dirty = true;
263 break; 269 break;
264 case RADEON_RB3D_COLORPITCH: 270 case RADEON_RB3D_COLORPITCH:
265 r = r100_cs_packet_next_reloc(p, &reloc); 271 r = r100_cs_packet_next_reloc(p, &reloc);
@@ -280,9 +286,11 @@ int r200_packet0_check(struct radeon_cs_parser *p,
280 ib[idx] = tmp; 286 ib[idx] = tmp;
281 287
282 track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK; 288 track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
289 track->cb_dirty = true;
283 break; 290 break;
284 case RADEON_RB3D_DEPTHPITCH: 291 case RADEON_RB3D_DEPTHPITCH:
285 track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK; 292 track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK;
293 track->zb_dirty = true;
286 break; 294 break;
287 case RADEON_RB3D_CNTL: 295 case RADEON_RB3D_CNTL:
288 switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) { 296 switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
@@ -312,6 +320,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
312 } 320 }
313 321
314 track->z_enabled = !!(idx_value & RADEON_Z_ENABLE); 322 track->z_enabled = !!(idx_value & RADEON_Z_ENABLE);
323 track->cb_dirty = true;
324 track->zb_dirty = true;
315 break; 325 break;
316 case RADEON_RB3D_ZSTENCILCNTL: 326 case RADEON_RB3D_ZSTENCILCNTL:
317 switch (idx_value & 0xf) { 327 switch (idx_value & 0xf) {
@@ -329,6 +339,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
329 default: 339 default:
330 break; 340 break;
331 } 341 }
342 track->zb_dirty = true;
332 break; 343 break;
333 case RADEON_RB3D_ZPASS_ADDR: 344 case RADEON_RB3D_ZPASS_ADDR:
334 r = r100_cs_packet_next_reloc(p, &reloc); 345 r = r100_cs_packet_next_reloc(p, &reloc);
@@ -345,6 +356,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
345 uint32_t temp = idx_value >> 4; 356 uint32_t temp = idx_value >> 4;
346 for (i = 0; i < track->num_texture; i++) 357 for (i = 0; i < track->num_texture; i++)
347 track->textures[i].enabled = !!(temp & (1 << i)); 358 track->textures[i].enabled = !!(temp & (1 << i));
359 track->tex_dirty = true;
348 } 360 }
349 break; 361 break;
350 case RADEON_SE_VF_CNTL: 362 case RADEON_SE_VF_CNTL:
@@ -369,6 +381,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
369 i = (reg - R200_PP_TXSIZE_0) / 32; 381 i = (reg - R200_PP_TXSIZE_0) / 32;
370 track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1; 382 track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1;
371 track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1; 383 track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
384 track->tex_dirty = true;
372 break; 385 break;
373 case R200_PP_TXPITCH_0: 386 case R200_PP_TXPITCH_0:
374 case R200_PP_TXPITCH_1: 387 case R200_PP_TXPITCH_1:
@@ -378,6 +391,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
378 case R200_PP_TXPITCH_5: 391 case R200_PP_TXPITCH_5:
379 i = (reg - R200_PP_TXPITCH_0) / 32; 392 i = (reg - R200_PP_TXPITCH_0) / 32;
380 track->textures[i].pitch = idx_value + 32; 393 track->textures[i].pitch = idx_value + 32;
394 track->tex_dirty = true;
381 break; 395 break;
382 case R200_PP_TXFILTER_0: 396 case R200_PP_TXFILTER_0:
383 case R200_PP_TXFILTER_1: 397 case R200_PP_TXFILTER_1:
@@ -394,6 +408,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
394 tmp = (idx_value >> 27) & 0x7; 408 tmp = (idx_value >> 27) & 0x7;
395 if (tmp == 2 || tmp == 6) 409 if (tmp == 2 || tmp == 6)
396 track->textures[i].roundup_h = false; 410 track->textures[i].roundup_h = false;
411 track->tex_dirty = true;
397 break; 412 break;
398 case R200_PP_TXMULTI_CTL_0: 413 case R200_PP_TXMULTI_CTL_0:
399 case R200_PP_TXMULTI_CTL_1: 414 case R200_PP_TXMULTI_CTL_1:
@@ -432,6 +447,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
432 track->textures[i].tex_coord_type = 1; 447 track->textures[i].tex_coord_type = 1;
433 break; 448 break;
434 } 449 }
450 track->tex_dirty = true;
435 break; 451 break;
436 case R200_PP_TXFORMAT_0: 452 case R200_PP_TXFORMAT_0:
437 case R200_PP_TXFORMAT_1: 453 case R200_PP_TXFORMAT_1:
@@ -488,6 +504,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
488 } 504 }
489 track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf); 505 track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
490 track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf); 506 track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf);
507 track->tex_dirty = true;
491 break; 508 break;
492 case R200_PP_CUBIC_FACES_0: 509 case R200_PP_CUBIC_FACES_0:
493 case R200_PP_CUBIC_FACES_1: 510 case R200_PP_CUBIC_FACES_1:
@@ -501,6 +518,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
501 track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf); 518 track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf);
502 track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf); 519 track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf);
503 } 520 }
521 track->tex_dirty = true;
504 break; 522 break;
505 default: 523 default:
506 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", 524 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 55fe5ba7def3..069efa8c8ecf 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -667,6 +667,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
667 } 667 }
668 track->cb[i].robj = reloc->robj; 668 track->cb[i].robj = reloc->robj;
669 track->cb[i].offset = idx_value; 669 track->cb[i].offset = idx_value;
670 track->cb_dirty = true;
670 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 671 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
671 break; 672 break;
672 case R300_ZB_DEPTHOFFSET: 673 case R300_ZB_DEPTHOFFSET:
@@ -679,6 +680,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
679 } 680 }
680 track->zb.robj = reloc->robj; 681 track->zb.robj = reloc->robj;
681 track->zb.offset = idx_value; 682 track->zb.offset = idx_value;
683 track->zb_dirty = true;
682 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 684 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
683 break; 685 break;
684 case R300_TX_OFFSET_0: 686 case R300_TX_OFFSET_0:
@@ -717,6 +719,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
717 tmp |= tile_flags; 719 tmp |= tile_flags;
718 ib[idx] = tmp; 720 ib[idx] = tmp;
719 track->textures[i].robj = reloc->robj; 721 track->textures[i].robj = reloc->robj;
722 track->tex_dirty = true;
720 break; 723 break;
721 /* Tracked registers */ 724 /* Tracked registers */
722 case 0x2084: 725 case 0x2084:
@@ -743,6 +746,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
743 if (p->rdev->family < CHIP_RV515) { 746 if (p->rdev->family < CHIP_RV515) {
744 track->maxy -= 1440; 747 track->maxy -= 1440;
745 } 748 }
749 track->cb_dirty = true;
750 track->zb_dirty = true;
746 break; 751 break;
747 case 0x4E00: 752 case 0x4E00:
748 /* RB3D_CCTL */ 753 /* RB3D_CCTL */
@@ -752,6 +757,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
752 return -EINVAL; 757 return -EINVAL;
753 } 758 }
754 track->num_cb = ((idx_value >> 5) & 0x3) + 1; 759 track->num_cb = ((idx_value >> 5) & 0x3) + 1;
760 track->cb_dirty = true;
755 break; 761 break;
756 case 0x4E38: 762 case 0x4E38:
757 case 0x4E3C: 763 case 0x4E3C:
@@ -814,6 +820,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
814 ((idx_value >> 21) & 0xF)); 820 ((idx_value >> 21) & 0xF));
815 return -EINVAL; 821 return -EINVAL;
816 } 822 }
823 track->cb_dirty = true;
817 break; 824 break;
818 case 0x4F00: 825 case 0x4F00:
819 /* ZB_CNTL */ 826 /* ZB_CNTL */
@@ -822,6 +829,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
822 } else { 829 } else {
823 track->z_enabled = false; 830 track->z_enabled = false;
824 } 831 }
832 track->zb_dirty = true;
825 break; 833 break;
826 case 0x4F10: 834 case 0x4F10:
827 /* ZB_FORMAT */ 835 /* ZB_FORMAT */
@@ -838,6 +846,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
838 (idx_value & 0xF)); 846 (idx_value & 0xF));
839 return -EINVAL; 847 return -EINVAL;
840 } 848 }
849 track->zb_dirty = true;
841 break; 850 break;
842 case 0x4F24: 851 case 0x4F24:
843 /* ZB_DEPTHPITCH */ 852 /* ZB_DEPTHPITCH */
@@ -861,14 +870,17 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
861 ib[idx] = tmp; 870 ib[idx] = tmp;
862 871
863 track->zb.pitch = idx_value & 0x3FFC; 872 track->zb.pitch = idx_value & 0x3FFC;
873 track->zb_dirty = true;
864 break; 874 break;
865 case 0x4104: 875 case 0x4104:
876 /* TX_ENABLE */
866 for (i = 0; i < 16; i++) { 877 for (i = 0; i < 16; i++) {
867 bool enabled; 878 bool enabled;
868 879
869 enabled = !!(idx_value & (1 << i)); 880 enabled = !!(idx_value & (1 << i));
870 track->textures[i].enabled = enabled; 881 track->textures[i].enabled = enabled;
871 } 882 }
883 track->tex_dirty = true;
872 break; 884 break;
873 case 0x44C0: 885 case 0x44C0:
874 case 0x44C4: 886 case 0x44C4:
@@ -898,6 +910,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
898 track->textures[i].compress_format = R100_TRACK_COMP_NONE; 910 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
899 break; 911 break;
900 case R300_TX_FORMAT_X16: 912 case R300_TX_FORMAT_X16:
913 case R300_TX_FORMAT_FL_I16:
901 case R300_TX_FORMAT_Y8X8: 914 case R300_TX_FORMAT_Y8X8:
902 case R300_TX_FORMAT_Z5Y6X5: 915 case R300_TX_FORMAT_Z5Y6X5:
903 case R300_TX_FORMAT_Z6Y5X5: 916 case R300_TX_FORMAT_Z6Y5X5:
@@ -910,6 +923,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
910 track->textures[i].compress_format = R100_TRACK_COMP_NONE; 923 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
911 break; 924 break;
912 case R300_TX_FORMAT_Y16X16: 925 case R300_TX_FORMAT_Y16X16:
926 case R300_TX_FORMAT_FL_I16A16:
913 case R300_TX_FORMAT_Z11Y11X10: 927 case R300_TX_FORMAT_Z11Y11X10:
914 case R300_TX_FORMAT_Z10Y11X11: 928 case R300_TX_FORMAT_Z10Y11X11:
915 case R300_TX_FORMAT_W8Z8Y8X8: 929 case R300_TX_FORMAT_W8Z8Y8X8:
@@ -951,8 +965,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
951 DRM_ERROR("Invalid texture format %u\n", 965 DRM_ERROR("Invalid texture format %u\n",
952 (idx_value & 0x1F)); 966 (idx_value & 0x1F));
953 return -EINVAL; 967 return -EINVAL;
954 break;
955 } 968 }
969 track->tex_dirty = true;
956 break; 970 break;
957 case 0x4400: 971 case 0x4400:
958 case 0x4404: 972 case 0x4404:
@@ -980,6 +994,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
980 if (tmp == 2 || tmp == 4 || tmp == 6) { 994 if (tmp == 2 || tmp == 4 || tmp == 6) {
981 track->textures[i].roundup_h = false; 995 track->textures[i].roundup_h = false;
982 } 996 }
997 track->tex_dirty = true;
983 break; 998 break;
984 case 0x4500: 999 case 0x4500:
985 case 0x4504: 1000 case 0x4504:
@@ -1017,6 +1032,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
1017 DRM_ERROR("Forbidden bit TXFORMAT_MSB\n"); 1032 DRM_ERROR("Forbidden bit TXFORMAT_MSB\n");
1018 return -EINVAL; 1033 return -EINVAL;
1019 } 1034 }
1035 track->tex_dirty = true;
1020 break; 1036 break;
1021 case 0x4480: 1037 case 0x4480:
1022 case 0x4484: 1038 case 0x4484:
@@ -1046,6 +1062,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
1046 track->textures[i].use_pitch = !!tmp; 1062 track->textures[i].use_pitch = !!tmp;
1047 tmp = (idx_value >> 22) & 0xF; 1063 tmp = (idx_value >> 22) & 0xF;
1048 track->textures[i].txdepth = tmp; 1064 track->textures[i].txdepth = tmp;
1065 track->tex_dirty = true;
1049 break; 1066 break;
1050 case R300_ZB_ZPASS_ADDR: 1067 case R300_ZB_ZPASS_ADDR:
1051 r = r100_cs_packet_next_reloc(p, &reloc); 1068 r = r100_cs_packet_next_reloc(p, &reloc);
@@ -1060,6 +1077,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
1060 case 0x4e0c: 1077 case 0x4e0c:
1061 /* RB3D_COLOR_CHANNEL_MASK */ 1078 /* RB3D_COLOR_CHANNEL_MASK */
1062 track->color_channel_mask = idx_value; 1079 track->color_channel_mask = idx_value;
1080 track->cb_dirty = true;
1063 break; 1081 break;
1064 case 0x43a4: 1082 case 0x43a4:
1065 /* SC_HYPERZ_EN */ 1083 /* SC_HYPERZ_EN */
@@ -1073,6 +1091,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
1073 case 0x4f1c: 1091 case 0x4f1c:
1074 /* ZB_BW_CNTL */ 1092 /* ZB_BW_CNTL */
1075 track->zb_cb_clear = !!(idx_value & (1 << 5)); 1093 track->zb_cb_clear = !!(idx_value & (1 << 5));
1094 track->cb_dirty = true;
1095 track->zb_dirty = true;
1076 if (p->rdev->hyperz_filp != p->filp) { 1096 if (p->rdev->hyperz_filp != p->filp) {
1077 if (idx_value & (R300_HIZ_ENABLE | 1097 if (idx_value & (R300_HIZ_ENABLE |
1078 R300_RD_COMP_ENABLE | 1098 R300_RD_COMP_ENABLE |
@@ -1084,8 +1104,28 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
1084 case 0x4e04: 1104 case 0x4e04:
1085 /* RB3D_BLENDCNTL */ 1105 /* RB3D_BLENDCNTL */
1086 track->blend_read_enable = !!(idx_value & (1 << 2)); 1106 track->blend_read_enable = !!(idx_value & (1 << 2));
1107 track->cb_dirty = true;
1108 break;
1109 case R300_RB3D_AARESOLVE_OFFSET:
1110 r = r100_cs_packet_next_reloc(p, &reloc);
1111 if (r) {
1112 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1113 idx, reg);
1114 r100_cs_dump_packet(p, pkt);
1115 return r;
1116 }
1117 track->aa.robj = reloc->robj;
1118 track->aa.offset = idx_value;
1119 track->aa_dirty = true;
1120 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1121 break;
1122 case R300_RB3D_AARESOLVE_PITCH:
1123 track->aa.pitch = idx_value & 0x3FFE;
1124 track->aa_dirty = true;
1087 break; 1125 break;
1088 case 0x4f28: /* ZB_DEPTHCLEARVALUE */ 1126 case R300_RB3D_AARESOLVE_CTL:
1127 track->aaresolve = idx_value & 0x1;
1128 track->aa_dirty = true;
1089 break; 1129 break;
1090 case 0x4f30: /* ZB_MASK_OFFSET */ 1130 case 0x4f30: /* ZB_MASK_OFFSET */
1091 case 0x4f34: /* ZB_ZMASK_PITCH */ 1131 case 0x4f34: /* ZB_ZMASK_PITCH */
diff --git a/drivers/gpu/drm/radeon/r300_reg.h b/drivers/gpu/drm/radeon/r300_reg.h
index 1a0d5362cd79..f0bce399c9f3 100644
--- a/drivers/gpu/drm/radeon/r300_reg.h
+++ b/drivers/gpu/drm/radeon/r300_reg.h
@@ -1371,6 +1371,8 @@
1371#define R300_RB3D_COLORPITCH2 0x4E40 /* GUESS */ 1371#define R300_RB3D_COLORPITCH2 0x4E40 /* GUESS */
1372#define R300_RB3D_COLORPITCH3 0x4E44 /* GUESS */ 1372#define R300_RB3D_COLORPITCH3 0x4E44 /* GUESS */
1373 1373
1374#define R300_RB3D_AARESOLVE_OFFSET 0x4E80
1375#define R300_RB3D_AARESOLVE_PITCH 0x4E84
1374#define R300_RB3D_AARESOLVE_CTL 0x4E88 1376#define R300_RB3D_AARESOLVE_CTL 0x4E88
1375/* gap */ 1377/* gap */
1376 1378
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 1e10e3e2ba2a..de88624d5f87 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -97,12 +97,16 @@ void r600_irq_disable(struct radeon_device *rdev);
97static void r600_pcie_gen2_enable(struct radeon_device *rdev); 97static void r600_pcie_gen2_enable(struct radeon_device *rdev);
98 98
99/* get temperature in millidegrees */ 99/* get temperature in millidegrees */
100u32 rv6xx_get_temp(struct radeon_device *rdev) 100int rv6xx_get_temp(struct radeon_device *rdev)
101{ 101{
102 u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >> 102 u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >>
103 ASIC_T_SHIFT; 103 ASIC_T_SHIFT;
104 int actual_temp = temp & 0xff;
104 105
105 return temp * 1000; 106 if (temp & 0x100)
107 actual_temp -= 256;
108
109 return actual_temp * 1000;
106} 110}
107 111
108void r600_pm_get_dynpm_state(struct radeon_device *rdev) 112void r600_pm_get_dynpm_state(struct radeon_device *rdev)
@@ -2101,7 +2105,11 @@ static int r600_cp_load_microcode(struct radeon_device *rdev)
2101 2105
2102 r600_cp_stop(rdev); 2106 r600_cp_stop(rdev);
2103 2107
2104 WREG32(CP_RB_CNTL, RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3)); 2108 WREG32(CP_RB_CNTL,
2109#ifdef __BIG_ENDIAN
2110 BUF_SWAP_32BIT |
2111#endif
2112 RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
2105 2113
2106 /* Reset cp */ 2114 /* Reset cp */
2107 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP); 2115 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
@@ -2188,7 +2196,11 @@ int r600_cp_resume(struct radeon_device *rdev)
2188 WREG32(CP_RB_WPTR, 0); 2196 WREG32(CP_RB_WPTR, 0);
2189 2197
2190 /* set the wb address whether it's enabled or not */ 2198 /* set the wb address whether it's enabled or not */
2191 WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC); 2199 WREG32(CP_RB_RPTR_ADDR,
2200#ifdef __BIG_ENDIAN
2201 RB_RPTR_SWAP(2) |
2202#endif
2203 ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
2192 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF); 2204 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
2193 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); 2205 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
2194 2206
@@ -2624,7 +2636,11 @@ void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
2624{ 2636{
2625 /* FIXME: implement */ 2637 /* FIXME: implement */
2626 radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); 2638 radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
2627 radeon_ring_write(rdev, ib->gpu_addr & 0xFFFFFFFC); 2639 radeon_ring_write(rdev,
2640#ifdef __BIG_ENDIAN
2641 (2 << 0) |
2642#endif
2643 (ib->gpu_addr & 0xFFFFFFFC));
2628 radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF); 2644 radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
2629 radeon_ring_write(rdev, ib->length_dw); 2645 radeon_ring_write(rdev, ib->length_dw);
2630} 2646}
@@ -3293,8 +3309,8 @@ restart_ih:
3293 while (rptr != wptr) { 3309 while (rptr != wptr) {
3294 /* wptr/rptr are in bytes! */ 3310 /* wptr/rptr are in bytes! */
3295 ring_index = rptr / 4; 3311 ring_index = rptr / 4;
3296 src_id = rdev->ih.ring[ring_index] & 0xff; 3312 src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
3297 src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff; 3313 src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
3298 3314
3299 switch (src_id) { 3315 switch (src_id) {
3300 case 1: /* D1 vblank/vline */ 3316 case 1: /* D1 vblank/vline */
diff --git a/drivers/gpu/drm/radeon/r600_blit.c b/drivers/gpu/drm/radeon/r600_blit.c
index ca5c29f70779..7f1043448d25 100644
--- a/drivers/gpu/drm/radeon/r600_blit.c
+++ b/drivers/gpu/drm/radeon/r600_blit.c
@@ -137,9 +137,9 @@ set_shaders(struct drm_device *dev)
137 ps = (u32 *) ((char *)dev->agp_buffer_map->handle + dev_priv->blit_vb->offset + 256); 137 ps = (u32 *) ((char *)dev->agp_buffer_map->handle + dev_priv->blit_vb->offset + 256);
138 138
139 for (i = 0; i < r6xx_vs_size; i++) 139 for (i = 0; i < r6xx_vs_size; i++)
140 vs[i] = r6xx_vs[i]; 140 vs[i] = cpu_to_le32(r6xx_vs[i]);
141 for (i = 0; i < r6xx_ps_size; i++) 141 for (i = 0; i < r6xx_ps_size; i++)
142 ps[i] = r6xx_ps[i]; 142 ps[i] = cpu_to_le32(r6xx_ps[i]);
143 143
144 dev_priv->blit_vb->used = 512; 144 dev_priv->blit_vb->used = 512;
145 145
@@ -192,6 +192,9 @@ set_vtx_resource(drm_radeon_private_t *dev_priv, u64 gpu_addr)
192 DRM_DEBUG("\n"); 192 DRM_DEBUG("\n");
193 193
194 sq_vtx_constant_word2 = (((gpu_addr >> 32) & 0xff) | (16 << 8)); 194 sq_vtx_constant_word2 = (((gpu_addr >> 32) & 0xff) | (16 << 8));
195#ifdef __BIG_ENDIAN
196 sq_vtx_constant_word2 |= (2 << 30);
197#endif
195 198
196 BEGIN_RING(9); 199 BEGIN_RING(9);
197 OUT_RING(CP_PACKET3(R600_IT_SET_RESOURCE, 7)); 200 OUT_RING(CP_PACKET3(R600_IT_SET_RESOURCE, 7));
@@ -291,7 +294,11 @@ draw_auto(drm_radeon_private_t *dev_priv)
291 OUT_RING(DI_PT_RECTLIST); 294 OUT_RING(DI_PT_RECTLIST);
292 295
293 OUT_RING(CP_PACKET3(R600_IT_INDEX_TYPE, 0)); 296 OUT_RING(CP_PACKET3(R600_IT_INDEX_TYPE, 0));
297#ifdef __BIG_ENDIAN
298 OUT_RING((2 << 2) | DI_INDEX_SIZE_16_BIT);
299#else
294 OUT_RING(DI_INDEX_SIZE_16_BIT); 300 OUT_RING(DI_INDEX_SIZE_16_BIT);
301#endif
295 302
296 OUT_RING(CP_PACKET3(R600_IT_NUM_INSTANCES, 0)); 303 OUT_RING(CP_PACKET3(R600_IT_NUM_INSTANCES, 0));
297 OUT_RING(1); 304 OUT_RING(1);
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index 86e5aa07f0db..41f7aafc97c4 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -54,7 +54,7 @@ set_render_target(struct radeon_device *rdev, int format,
54 if (h < 8) 54 if (h < 8)
55 h = 8; 55 h = 8;
56 56
57 cb_color_info = ((format << 2) | (1 << 27)); 57 cb_color_info = ((format << 2) | (1 << 27) | (1 << 8));
58 pitch = (w / 8) - 1; 58 pitch = (w / 8) - 1;
59 slice = ((w * h) / 64) - 1; 59 slice = ((w * h) / 64) - 1;
60 60
@@ -165,6 +165,9 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
165 u32 sq_vtx_constant_word2; 165 u32 sq_vtx_constant_word2;
166 166
167 sq_vtx_constant_word2 = ((upper_32_bits(gpu_addr) & 0xff) | (16 << 8)); 167 sq_vtx_constant_word2 = ((upper_32_bits(gpu_addr) & 0xff) | (16 << 8));
168#ifdef __BIG_ENDIAN
169 sq_vtx_constant_word2 |= (2 << 30);
170#endif
168 171
169 radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 7)); 172 radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 7));
170 radeon_ring_write(rdev, 0x460); 173 radeon_ring_write(rdev, 0x460);
@@ -199,7 +202,7 @@ set_tex_resource(struct radeon_device *rdev,
199 if (h < 1) 202 if (h < 1)
200 h = 1; 203 h = 1;
201 204
202 sq_tex_resource_word0 = (1 << 0); 205 sq_tex_resource_word0 = (1 << 0) | (1 << 3);
203 sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 8) | 206 sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 8) |
204 ((w - 1) << 19)); 207 ((w - 1) << 19));
205 208
@@ -253,7 +256,11 @@ draw_auto(struct radeon_device *rdev)
253 radeon_ring_write(rdev, DI_PT_RECTLIST); 256 radeon_ring_write(rdev, DI_PT_RECTLIST);
254 257
255 radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0)); 258 radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0));
256 radeon_ring_write(rdev, DI_INDEX_SIZE_16_BIT); 259 radeon_ring_write(rdev,
260#ifdef __BIG_ENDIAN
261 (2 << 2) |
262#endif
263 DI_INDEX_SIZE_16_BIT);
257 264
258 radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0)); 265 radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0));
259 radeon_ring_write(rdev, 1); 266 radeon_ring_write(rdev, 1);
@@ -424,7 +431,11 @@ set_default_state(struct radeon_device *rdev)
424 dwords = ALIGN(rdev->r600_blit.state_len, 0x10); 431 dwords = ALIGN(rdev->r600_blit.state_len, 0x10);
425 gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset; 432 gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset;
426 radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); 433 radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
427 radeon_ring_write(rdev, gpu_addr & 0xFFFFFFFC); 434 radeon_ring_write(rdev,
435#ifdef __BIG_ENDIAN
436 (2 << 0) |
437#endif
438 (gpu_addr & 0xFFFFFFFC));
428 radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF); 439 radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF);
429 radeon_ring_write(rdev, dwords); 440 radeon_ring_write(rdev, dwords);
430 441
@@ -467,7 +478,7 @@ static inline uint32_t i2f(uint32_t input)
467int r600_blit_init(struct radeon_device *rdev) 478int r600_blit_init(struct radeon_device *rdev)
468{ 479{
469 u32 obj_size; 480 u32 obj_size;
470 int r, dwords; 481 int i, r, dwords;
471 void *ptr; 482 void *ptr;
472 u32 packet2s[16]; 483 u32 packet2s[16];
473 int num_packet2s = 0; 484 int num_packet2s = 0;
@@ -486,7 +497,7 @@ int r600_blit_init(struct radeon_device *rdev)
486 497
487 dwords = rdev->r600_blit.state_len; 498 dwords = rdev->r600_blit.state_len;
488 while (dwords & 0xf) { 499 while (dwords & 0xf) {
489 packet2s[num_packet2s++] = PACKET2(0); 500 packet2s[num_packet2s++] = cpu_to_le32(PACKET2(0));
490 dwords++; 501 dwords++;
491 } 502 }
492 503
@@ -529,8 +540,10 @@ int r600_blit_init(struct radeon_device *rdev)
529 if (num_packet2s) 540 if (num_packet2s)
530 memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4), 541 memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
531 packet2s, num_packet2s * 4); 542 packet2s, num_packet2s * 4);
532 memcpy(ptr + rdev->r600_blit.vs_offset, r6xx_vs, r6xx_vs_size * 4); 543 for (i = 0; i < r6xx_vs_size; i++)
533 memcpy(ptr + rdev->r600_blit.ps_offset, r6xx_ps, r6xx_ps_size * 4); 544 *(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(r6xx_vs[i]);
545 for (i = 0; i < r6xx_ps_size; i++)
546 *(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(r6xx_ps[i]);
534 radeon_bo_kunmap(rdev->r600_blit.shader_obj); 547 radeon_bo_kunmap(rdev->r600_blit.shader_obj);
535 radeon_bo_unreserve(rdev->r600_blit.shader_obj); 548 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
536 549
diff --git a/drivers/gpu/drm/radeon/r600_blit_shaders.c b/drivers/gpu/drm/radeon/r600_blit_shaders.c
index e8151c1d55b2..2d1f6c5ee2a7 100644
--- a/drivers/gpu/drm/radeon/r600_blit_shaders.c
+++ b/drivers/gpu/drm/radeon/r600_blit_shaders.c
@@ -684,7 +684,11 @@ const u32 r6xx_vs[] =
684 0x00000000, 684 0x00000000,
685 0x3c000000, 685 0x3c000000,
686 0x68cd1000, 686 0x68cd1000,
687#ifdef __BIG_ENDIAN
688 0x000a0000,
689#else
687 0x00080000, 690 0x00080000,
691#endif
688 0x00000000, 692 0x00000000,
689}; 693};
690 694
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index 4f4cd8b286d5..c3ab959bdc7c 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -396,6 +396,9 @@ static void r600_cp_load_microcode(drm_radeon_private_t *dev_priv)
396 r600_do_cp_stop(dev_priv); 396 r600_do_cp_stop(dev_priv);
397 397
398 RADEON_WRITE(R600_CP_RB_CNTL, 398 RADEON_WRITE(R600_CP_RB_CNTL,
399#ifdef __BIG_ENDIAN
400 R600_BUF_SWAP_32BIT |
401#endif
399 R600_RB_NO_UPDATE | 402 R600_RB_NO_UPDATE |
400 R600_RB_BLKSZ(15) | 403 R600_RB_BLKSZ(15) |
401 R600_RB_BUFSZ(3)); 404 R600_RB_BUFSZ(3));
@@ -486,9 +489,12 @@ static void r700_cp_load_microcode(drm_radeon_private_t *dev_priv)
486 r600_do_cp_stop(dev_priv); 489 r600_do_cp_stop(dev_priv);
487 490
488 RADEON_WRITE(R600_CP_RB_CNTL, 491 RADEON_WRITE(R600_CP_RB_CNTL,
492#ifdef __BIG_ENDIAN
493 R600_BUF_SWAP_32BIT |
494#endif
489 R600_RB_NO_UPDATE | 495 R600_RB_NO_UPDATE |
490 (15 << 8) | 496 R600_RB_BLKSZ(15) |
491 (3 << 0)); 497 R600_RB_BUFSZ(3));
492 498
493 RADEON_WRITE(R600_GRBM_SOFT_RESET, R600_SOFT_RESET_CP); 499 RADEON_WRITE(R600_GRBM_SOFT_RESET, R600_SOFT_RESET_CP);
494 RADEON_READ(R600_GRBM_SOFT_RESET); 500 RADEON_READ(R600_GRBM_SOFT_RESET);
@@ -550,8 +556,12 @@ static void r600_test_writeback(drm_radeon_private_t *dev_priv)
550 556
551 if (!dev_priv->writeback_works) { 557 if (!dev_priv->writeback_works) {
552 /* Disable writeback to avoid unnecessary bus master transfer */ 558 /* Disable writeback to avoid unnecessary bus master transfer */
553 RADEON_WRITE(R600_CP_RB_CNTL, RADEON_READ(R600_CP_RB_CNTL) | 559 RADEON_WRITE(R600_CP_RB_CNTL,
554 RADEON_RB_NO_UPDATE); 560#ifdef __BIG_ENDIAN
561 R600_BUF_SWAP_32BIT |
562#endif
563 RADEON_READ(R600_CP_RB_CNTL) |
564 R600_RB_NO_UPDATE);
555 RADEON_WRITE(R600_SCRATCH_UMSK, 0); 565 RADEON_WRITE(R600_SCRATCH_UMSK, 0);
556 } 566 }
557} 567}
@@ -575,7 +585,11 @@ int r600_do_engine_reset(struct drm_device *dev)
575 585
576 RADEON_WRITE(R600_CP_RB_WPTR_DELAY, 0); 586 RADEON_WRITE(R600_CP_RB_WPTR_DELAY, 0);
577 cp_rb_cntl = RADEON_READ(R600_CP_RB_CNTL); 587 cp_rb_cntl = RADEON_READ(R600_CP_RB_CNTL);
578 RADEON_WRITE(R600_CP_RB_CNTL, R600_RB_RPTR_WR_ENA); 588 RADEON_WRITE(R600_CP_RB_CNTL,
589#ifdef __BIG_ENDIAN
590 R600_BUF_SWAP_32BIT |
591#endif
592 R600_RB_RPTR_WR_ENA);
579 593
580 RADEON_WRITE(R600_CP_RB_RPTR_WR, cp_ptr); 594 RADEON_WRITE(R600_CP_RB_RPTR_WR, cp_ptr);
581 RADEON_WRITE(R600_CP_RB_WPTR, cp_ptr); 595 RADEON_WRITE(R600_CP_RB_WPTR, cp_ptr);
@@ -1838,7 +1852,10 @@ static void r600_cp_init_ring_buffer(struct drm_device *dev,
1838 + dev_priv->gart_vm_start; 1852 + dev_priv->gart_vm_start;
1839 } 1853 }
1840 RADEON_WRITE(R600_CP_RB_RPTR_ADDR, 1854 RADEON_WRITE(R600_CP_RB_RPTR_ADDR,
1841 rptr_addr & 0xffffffff); 1855#ifdef __BIG_ENDIAN
1856 (2 << 0) |
1857#endif
1858 (rptr_addr & 0xfffffffc));
1842 RADEON_WRITE(R600_CP_RB_RPTR_ADDR_HI, 1859 RADEON_WRITE(R600_CP_RB_RPTR_ADDR_HI,
1843 upper_32_bits(rptr_addr)); 1860 upper_32_bits(rptr_addr));
1844 1861
@@ -1889,7 +1906,7 @@ static void r600_cp_init_ring_buffer(struct drm_device *dev,
1889 { 1906 {
1890 u64 scratch_addr; 1907 u64 scratch_addr;
1891 1908
1892 scratch_addr = RADEON_READ(R600_CP_RB_RPTR_ADDR); 1909 scratch_addr = RADEON_READ(R600_CP_RB_RPTR_ADDR) & 0xFFFFFFFC;
1893 scratch_addr |= ((u64)RADEON_READ(R600_CP_RB_RPTR_ADDR_HI)) << 32; 1910 scratch_addr |= ((u64)RADEON_READ(R600_CP_RB_RPTR_ADDR_HI)) << 32;
1894 scratch_addr += R600_SCRATCH_REG_OFFSET; 1911 scratch_addr += R600_SCRATCH_REG_OFFSET;
1895 scratch_addr >>= 8; 1912 scratch_addr >>= 8;
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 7831e0890210..153095fba62f 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -295,17 +295,18 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
295 } 295 }
296 296
297 if (!IS_ALIGNED(pitch, pitch_align)) { 297 if (!IS_ALIGNED(pitch, pitch_align)) {
298 dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n", 298 dev_warn(p->dev, "%s:%d cb pitch (%d, 0x%x, %d) invalid\n",
299 __func__, __LINE__, pitch); 299 __func__, __LINE__, pitch, pitch_align, array_mode);
300 return -EINVAL; 300 return -EINVAL;
301 } 301 }
302 if (!IS_ALIGNED(height, height_align)) { 302 if (!IS_ALIGNED(height, height_align)) {
303 dev_warn(p->dev, "%s:%d cb height (%d) invalid\n", 303 dev_warn(p->dev, "%s:%d cb height (%d, 0x%x, %d) invalid\n",
304 __func__, __LINE__, height); 304 __func__, __LINE__, height, height_align, array_mode);
305 return -EINVAL; 305 return -EINVAL;
306 } 306 }
307 if (!IS_ALIGNED(base_offset, base_align)) { 307 if (!IS_ALIGNED(base_offset, base_align)) {
308 dev_warn(p->dev, "%s offset[%d] 0x%llx not aligned\n", __func__, i, base_offset); 308 dev_warn(p->dev, "%s offset[%d] 0x%llx 0x%llx, %d not aligned\n", __func__, i,
309 base_offset, base_align, array_mode);
309 return -EINVAL; 310 return -EINVAL;
310 } 311 }
311 312
@@ -320,7 +321,10 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
320 * broken userspace. 321 * broken userspace.
321 */ 322 */
322 } else { 323 } else {
323 dev_warn(p->dev, "%s offset[%d] %d %d %lu too big\n", __func__, i, track->cb_color_bo_offset[i], tmp, radeon_bo_size(track->cb_color_bo[i])); 324 dev_warn(p->dev, "%s offset[%d] %d %d %d %lu too big\n", __func__, i,
325 array_mode,
326 track->cb_color_bo_offset[i], tmp,
327 radeon_bo_size(track->cb_color_bo[i]));
324 return -EINVAL; 328 return -EINVAL;
325 } 329 }
326 } 330 }
@@ -455,17 +459,18 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
455 } 459 }
456 460
457 if (!IS_ALIGNED(pitch, pitch_align)) { 461 if (!IS_ALIGNED(pitch, pitch_align)) {
458 dev_warn(p->dev, "%s:%d db pitch (%d) invalid\n", 462 dev_warn(p->dev, "%s:%d db pitch (%d, 0x%x, %d) invalid\n",
459 __func__, __LINE__, pitch); 463 __func__, __LINE__, pitch, pitch_align, array_mode);
460 return -EINVAL; 464 return -EINVAL;
461 } 465 }
462 if (!IS_ALIGNED(height, height_align)) { 466 if (!IS_ALIGNED(height, height_align)) {
463 dev_warn(p->dev, "%s:%d db height (%d) invalid\n", 467 dev_warn(p->dev, "%s:%d db height (%d, 0x%x, %d) invalid\n",
464 __func__, __LINE__, height); 468 __func__, __LINE__, height, height_align, array_mode);
465 return -EINVAL; 469 return -EINVAL;
466 } 470 }
467 if (!IS_ALIGNED(base_offset, base_align)) { 471 if (!IS_ALIGNED(base_offset, base_align)) {
468 dev_warn(p->dev, "%s offset[%d] 0x%llx not aligned\n", __func__, i, base_offset); 472 dev_warn(p->dev, "%s offset[%d] 0x%llx, 0x%llx, %d not aligned\n", __func__, i,
473 base_offset, base_align, array_mode);
469 return -EINVAL; 474 return -EINVAL;
470 } 475 }
471 476
@@ -473,9 +478,10 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
473 nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1; 478 nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1;
474 tmp = ntiles * bpe * 64 * nviews; 479 tmp = ntiles * bpe * 64 * nviews;
475 if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) { 480 if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) {
476 dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %d -> %u have %lu)\n", 481 dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n",
477 track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset, 482 array_mode,
478 radeon_bo_size(track->db_bo)); 483 track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset,
484 radeon_bo_size(track->db_bo));
479 return -EINVAL; 485 return -EINVAL;
480 } 486 }
481 } 487 }
@@ -1227,18 +1233,18 @@ static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 i
1227 /* XXX check height as well... */ 1233 /* XXX check height as well... */
1228 1234
1229 if (!IS_ALIGNED(pitch, pitch_align)) { 1235 if (!IS_ALIGNED(pitch, pitch_align)) {
1230 dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n", 1236 dev_warn(p->dev, "%s:%d tex pitch (%d, 0x%x, %d) invalid\n",
1231 __func__, __LINE__, pitch); 1237 __func__, __LINE__, pitch, pitch_align, G_038000_TILE_MODE(word0));
1232 return -EINVAL; 1238 return -EINVAL;
1233 } 1239 }
1234 if (!IS_ALIGNED(base_offset, base_align)) { 1240 if (!IS_ALIGNED(base_offset, base_align)) {
1235 dev_warn(p->dev, "%s:%d tex base offset (0x%llx) invalid\n", 1241 dev_warn(p->dev, "%s:%d tex base offset (0x%llx, 0x%llx, %d) invalid\n",
1236 __func__, __LINE__, base_offset); 1242 __func__, __LINE__, base_offset, base_align, G_038000_TILE_MODE(word0));
1237 return -EINVAL; 1243 return -EINVAL;
1238 } 1244 }
1239 if (!IS_ALIGNED(mip_offset, base_align)) { 1245 if (!IS_ALIGNED(mip_offset, base_align)) {
1240 dev_warn(p->dev, "%s:%d tex mip offset (0x%llx) invalid\n", 1246 dev_warn(p->dev, "%s:%d tex mip offset (0x%llx, 0x%llx, %d) invalid\n",
1241 __func__, __LINE__, mip_offset); 1247 __func__, __LINE__, mip_offset, base_align, G_038000_TILE_MODE(word0));
1242 return -EINVAL; 1248 return -EINVAL;
1243 } 1249 }
1244 1250
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index a5d898b4bad2..04bac0bbd3ec 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -154,13 +154,14 @@
154#define ROQ_IB2_START(x) ((x) << 8) 154#define ROQ_IB2_START(x) ((x) << 8)
155#define CP_RB_BASE 0xC100 155#define CP_RB_BASE 0xC100
156#define CP_RB_CNTL 0xC104 156#define CP_RB_CNTL 0xC104
157#define RB_BUFSZ(x) ((x)<<0) 157#define RB_BUFSZ(x) ((x) << 0)
158#define RB_BLKSZ(x) ((x)<<8) 158#define RB_BLKSZ(x) ((x) << 8)
159#define RB_NO_UPDATE (1<<27) 159#define RB_NO_UPDATE (1 << 27)
160#define RB_RPTR_WR_ENA (1<<31) 160#define RB_RPTR_WR_ENA (1 << 31)
161#define BUF_SWAP_32BIT (2 << 16) 161#define BUF_SWAP_32BIT (2 << 16)
162#define CP_RB_RPTR 0x8700 162#define CP_RB_RPTR 0x8700
163#define CP_RB_RPTR_ADDR 0xC10C 163#define CP_RB_RPTR_ADDR 0xC10C
164#define RB_RPTR_SWAP(x) ((x) << 0)
164#define CP_RB_RPTR_ADDR_HI 0xC110 165#define CP_RB_RPTR_ADDR_HI 0xC110
165#define CP_RB_RPTR_WR 0xC108 166#define CP_RB_RPTR_WR 0xC108
166#define CP_RB_WPTR 0xC114 167#define CP_RB_WPTR 0xC114
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 71d2a554bbe6..56c48b67ef3d 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -179,10 +179,10 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev);
179void radeon_atombios_get_power_modes(struct radeon_device *rdev); 179void radeon_atombios_get_power_modes(struct radeon_device *rdev);
180void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level); 180void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level);
181void rs690_pm_info(struct radeon_device *rdev); 181void rs690_pm_info(struct radeon_device *rdev);
182extern u32 rv6xx_get_temp(struct radeon_device *rdev); 182extern int rv6xx_get_temp(struct radeon_device *rdev);
183extern u32 rv770_get_temp(struct radeon_device *rdev); 183extern int rv770_get_temp(struct radeon_device *rdev);
184extern u32 evergreen_get_temp(struct radeon_device *rdev); 184extern int evergreen_get_temp(struct radeon_device *rdev);
185extern u32 sumo_get_temp(struct radeon_device *rdev); 185extern int sumo_get_temp(struct radeon_device *rdev);
186 186
187/* 187/*
188 * Fences. 188 * Fences.
@@ -812,8 +812,7 @@ struct radeon_pm {
812 fixed20_12 sclk; 812 fixed20_12 sclk;
813 fixed20_12 mclk; 813 fixed20_12 mclk;
814 fixed20_12 needed_bandwidth; 814 fixed20_12 needed_bandwidth;
815 /* XXX: use a define for num power modes */ 815 struct radeon_power_state *power_state;
816 struct radeon_power_state power_state[8];
817 /* number of valid power states */ 816 /* number of valid power states */
818 int num_power_states; 817 int num_power_states;
819 int current_power_state_index; 818 int current_power_state_index;
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 3a1b16186224..e75d63b8e21d 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -759,7 +759,7 @@ static struct radeon_asic evergreen_asic = {
759 .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush, 759 .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
760 .gart_set_page = &rs600_gart_set_page, 760 .gart_set_page = &rs600_gart_set_page,
761 .ring_test = &r600_ring_test, 761 .ring_test = &r600_ring_test,
762 .ring_ib_execute = &r600_ring_ib_execute, 762 .ring_ib_execute = &evergreen_ring_ib_execute,
763 .irq_set = &evergreen_irq_set, 763 .irq_set = &evergreen_irq_set,
764 .irq_process = &evergreen_irq_process, 764 .irq_process = &evergreen_irq_process,
765 .get_vblank_counter = &evergreen_get_vblank_counter, 765 .get_vblank_counter = &evergreen_get_vblank_counter,
@@ -805,7 +805,7 @@ static struct radeon_asic sumo_asic = {
805 .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush, 805 .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
806 .gart_set_page = &rs600_gart_set_page, 806 .gart_set_page = &rs600_gart_set_page,
807 .ring_test = &r600_ring_test, 807 .ring_test = &r600_ring_test,
808 .ring_ib_execute = &r600_ring_ib_execute, 808 .ring_ib_execute = &evergreen_ring_ib_execute,
809 .irq_set = &evergreen_irq_set, 809 .irq_set = &evergreen_irq_set,
810 .irq_process = &evergreen_irq_process, 810 .irq_process = &evergreen_irq_process,
811 .get_vblank_counter = &evergreen_get_vblank_counter, 811 .get_vblank_counter = &evergreen_get_vblank_counter,
@@ -848,7 +848,7 @@ static struct radeon_asic btc_asic = {
848 .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush, 848 .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
849 .gart_set_page = &rs600_gart_set_page, 849 .gart_set_page = &rs600_gart_set_page,
850 .ring_test = &r600_ring_test, 850 .ring_test = &r600_ring_test,
851 .ring_ib_execute = &r600_ring_ib_execute, 851 .ring_ib_execute = &evergreen_ring_ib_execute,
852 .irq_set = &evergreen_irq_set, 852 .irq_set = &evergreen_irq_set,
853 .irq_process = &evergreen_irq_process, 853 .irq_process = &evergreen_irq_process,
854 .get_vblank_counter = &evergreen_get_vblank_counter, 854 .get_vblank_counter = &evergreen_get_vblank_counter,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index e01f07718539..c59bd98a2029 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -355,6 +355,7 @@ int evergreen_resume(struct radeon_device *rdev);
355bool evergreen_gpu_is_lockup(struct radeon_device *rdev); 355bool evergreen_gpu_is_lockup(struct radeon_device *rdev);
356int evergreen_asic_reset(struct radeon_device *rdev); 356int evergreen_asic_reset(struct radeon_device *rdev);
357void evergreen_bandwidth_update(struct radeon_device *rdev); 357void evergreen_bandwidth_update(struct radeon_device *rdev);
358void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
358int evergreen_copy_blit(struct radeon_device *rdev, 359int evergreen_copy_blit(struct radeon_device *rdev,
359 uint64_t src_offset, uint64_t dst_offset, 360 uint64_t src_offset, uint64_t dst_offset,
360 unsigned num_pages, struct radeon_fence *fence); 361 unsigned num_pages, struct radeon_fence *fence);
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 52777902bbcc..02d5c415f499 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -88,7 +88,7 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev
88 /* some evergreen boards have bad data for this entry */ 88 /* some evergreen boards have bad data for this entry */
89 if (ASIC_IS_DCE4(rdev)) { 89 if (ASIC_IS_DCE4(rdev)) {
90 if ((i == 7) && 90 if ((i == 7) &&
91 (gpio->usClkMaskRegisterIndex == 0x1936) && 91 (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) &&
92 (gpio->sucI2cId.ucAccess == 0)) { 92 (gpio->sucI2cId.ucAccess == 0)) {
93 gpio->sucI2cId.ucAccess = 0x97; 93 gpio->sucI2cId.ucAccess = 0x97;
94 gpio->ucDataMaskShift = 8; 94 gpio->ucDataMaskShift = 8;
@@ -101,7 +101,7 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev
101 /* some DCE3 boards have bad data for this entry */ 101 /* some DCE3 boards have bad data for this entry */
102 if (ASIC_IS_DCE3(rdev)) { 102 if (ASIC_IS_DCE3(rdev)) {
103 if ((i == 4) && 103 if ((i == 4) &&
104 (gpio->usClkMaskRegisterIndex == 0x1fda) && 104 (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) &&
105 (gpio->sucI2cId.ucAccess == 0x94)) 105 (gpio->sucI2cId.ucAccess == 0x94))
106 gpio->sucI2cId.ucAccess = 0x14; 106 gpio->sucI2cId.ucAccess = 0x14;
107 } 107 }
@@ -172,7 +172,7 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
172 /* some evergreen boards have bad data for this entry */ 172 /* some evergreen boards have bad data for this entry */
173 if (ASIC_IS_DCE4(rdev)) { 173 if (ASIC_IS_DCE4(rdev)) {
174 if ((i == 7) && 174 if ((i == 7) &&
175 (gpio->usClkMaskRegisterIndex == 0x1936) && 175 (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) &&
176 (gpio->sucI2cId.ucAccess == 0)) { 176 (gpio->sucI2cId.ucAccess == 0)) {
177 gpio->sucI2cId.ucAccess = 0x97; 177 gpio->sucI2cId.ucAccess = 0x97;
178 gpio->ucDataMaskShift = 8; 178 gpio->ucDataMaskShift = 8;
@@ -185,7 +185,7 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
185 /* some DCE3 boards have bad data for this entry */ 185 /* some DCE3 boards have bad data for this entry */
186 if (ASIC_IS_DCE3(rdev)) { 186 if (ASIC_IS_DCE3(rdev)) {
187 if ((i == 4) && 187 if ((i == 4) &&
188 (gpio->usClkMaskRegisterIndex == 0x1fda) && 188 (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) &&
189 (gpio->sucI2cId.ucAccess == 0x94)) 189 (gpio->sucI2cId.ucAccess == 0x94))
190 gpio->sucI2cId.ucAccess = 0x14; 190 gpio->sucI2cId.ucAccess = 0x14;
191 } 191 }
@@ -252,7 +252,7 @@ static inline struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rd
252 pin = &gpio_info->asGPIO_Pin[i]; 252 pin = &gpio_info->asGPIO_Pin[i];
253 if (id == pin->ucGPIO_ID) { 253 if (id == pin->ucGPIO_ID) {
254 gpio.id = pin->ucGPIO_ID; 254 gpio.id = pin->ucGPIO_ID;
255 gpio.reg = pin->usGpioPin_AIndex * 4; 255 gpio.reg = le16_to_cpu(pin->usGpioPin_AIndex) * 4;
256 gpio.mask = (1 << pin->ucGpioPinBitShift); 256 gpio.mask = (1 << pin->ucGpioPinBitShift);
257 gpio.valid = true; 257 gpio.valid = true;
258 break; 258 break;
@@ -1163,16 +1163,6 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
1163 p1pll->pll_out_min = 64800; 1163 p1pll->pll_out_min = 64800;
1164 else 1164 else
1165 p1pll->pll_out_min = 20000; 1165 p1pll->pll_out_min = 20000;
1166 } else if (p1pll->pll_out_min > 64800) {
1167 /* Limiting the pll output range is a good thing generally as
1168 * it limits the number of possible pll combinations for a given
1169 * frequency presumably to the ones that work best on each card.
1170 * However, certain duallink DVI monitors seem to like
1171 * pll combinations that would be limited by this at least on
1172 * pre-DCE 3.0 r6xx hardware. This might need to be adjusted per
1173 * family.
1174 */
1175 p1pll->pll_out_min = 64800;
1176 } 1166 }
1177 1167
1178 p1pll->pll_in_min = 1168 p1pll->pll_in_min =
@@ -1284,11 +1274,11 @@ bool radeon_atombios_sideport_present(struct radeon_device *rdev)
1284 data_offset); 1274 data_offset);
1285 switch (crev) { 1275 switch (crev) {
1286 case 1: 1276 case 1:
1287 if (igp_info->info.ulBootUpMemoryClock) 1277 if (le32_to_cpu(igp_info->info.ulBootUpMemoryClock))
1288 return true; 1278 return true;
1289 break; 1279 break;
1290 case 2: 1280 case 2:
1291 if (igp_info->info_2.ulBootUpSidePortClock) 1281 if (le32_to_cpu(igp_info->info_2.ulBootUpSidePortClock))
1292 return true; 1282 return true;
1293 break; 1283 break;
1294 default: 1284 default:
@@ -1452,7 +1442,7 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
1452 1442
1453 for (i = 0; i < num_indices; i++) { 1443 for (i = 0; i < num_indices; i++) {
1454 if ((ss_info->info.asSpreadSpectrum[i].ucClockIndication == id) && 1444 if ((ss_info->info.asSpreadSpectrum[i].ucClockIndication == id) &&
1455 (clock <= ss_info->info.asSpreadSpectrum[i].ulTargetClockRange)) { 1445 (clock <= le32_to_cpu(ss_info->info.asSpreadSpectrum[i].ulTargetClockRange))) {
1456 ss->percentage = 1446 ss->percentage =
1457 le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadSpectrumPercentage); 1447 le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
1458 ss->type = ss_info->info.asSpreadSpectrum[i].ucSpreadSpectrumMode; 1448 ss->type = ss_info->info.asSpreadSpectrum[i].ucSpreadSpectrumMode;
@@ -1466,7 +1456,7 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
1466 sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2); 1456 sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2);
1467 for (i = 0; i < num_indices; i++) { 1457 for (i = 0; i < num_indices; i++) {
1468 if ((ss_info->info_2.asSpreadSpectrum[i].ucClockIndication == id) && 1458 if ((ss_info->info_2.asSpreadSpectrum[i].ucClockIndication == id) &&
1469 (clock <= ss_info->info_2.asSpreadSpectrum[i].ulTargetClockRange)) { 1459 (clock <= le32_to_cpu(ss_info->info_2.asSpreadSpectrum[i].ulTargetClockRange))) {
1470 ss->percentage = 1460 ss->percentage =
1471 le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadSpectrumPercentage); 1461 le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
1472 ss->type = ss_info->info_2.asSpreadSpectrum[i].ucSpreadSpectrumMode; 1462 ss->type = ss_info->info_2.asSpreadSpectrum[i].ucSpreadSpectrumMode;
@@ -1480,7 +1470,7 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
1480 sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3); 1470 sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3);
1481 for (i = 0; i < num_indices; i++) { 1471 for (i = 0; i < num_indices; i++) {
1482 if ((ss_info->info_3.asSpreadSpectrum[i].ucClockIndication == id) && 1472 if ((ss_info->info_3.asSpreadSpectrum[i].ucClockIndication == id) &&
1483 (clock <= ss_info->info_3.asSpreadSpectrum[i].ulTargetClockRange)) { 1473 (clock <= le32_to_cpu(ss_info->info_3.asSpreadSpectrum[i].ulTargetClockRange))) {
1484 ss->percentage = 1474 ss->percentage =
1485 le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadSpectrumPercentage); 1475 le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
1486 ss->type = ss_info->info_3.asSpreadSpectrum[i].ucSpreadSpectrumMode; 1476 ss->type = ss_info->info_3.asSpreadSpectrum[i].ucSpreadSpectrumMode;
@@ -1563,8 +1553,8 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
1563 if (misc & ATOM_DOUBLE_CLOCK_MODE) 1553 if (misc & ATOM_DOUBLE_CLOCK_MODE)
1564 lvds->native_mode.flags |= DRM_MODE_FLAG_DBLSCAN; 1554 lvds->native_mode.flags |= DRM_MODE_FLAG_DBLSCAN;
1565 1555
1566 lvds->native_mode.width_mm = lvds_info->info.sLCDTiming.usImageHSize; 1556 lvds->native_mode.width_mm = le16_to_cpu(lvds_info->info.sLCDTiming.usImageHSize);
1567 lvds->native_mode.height_mm = lvds_info->info.sLCDTiming.usImageVSize; 1557 lvds->native_mode.height_mm = le16_to_cpu(lvds_info->info.sLCDTiming.usImageVSize);
1568 1558
1569 /* set crtc values */ 1559 /* set crtc values */
1570 drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V); 1560 drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V);
@@ -1579,13 +1569,13 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
1579 lvds->linkb = false; 1569 lvds->linkb = false;
1580 1570
1581 /* parse the lcd record table */ 1571 /* parse the lcd record table */
1582 if (lvds_info->info.usModePatchTableOffset) { 1572 if (le16_to_cpu(lvds_info->info.usModePatchTableOffset)) {
1583 ATOM_FAKE_EDID_PATCH_RECORD *fake_edid_record; 1573 ATOM_FAKE_EDID_PATCH_RECORD *fake_edid_record;
1584 ATOM_PANEL_RESOLUTION_PATCH_RECORD *panel_res_record; 1574 ATOM_PANEL_RESOLUTION_PATCH_RECORD *panel_res_record;
1585 bool bad_record = false; 1575 bool bad_record = false;
1586 u8 *record = (u8 *)(mode_info->atom_context->bios + 1576 u8 *record = (u8 *)(mode_info->atom_context->bios +
1587 data_offset + 1577 data_offset +
1588 lvds_info->info.usModePatchTableOffset); 1578 le16_to_cpu(lvds_info->info.usModePatchTableOffset));
1589 while (*record != ATOM_RECORD_END_TYPE) { 1579 while (*record != ATOM_RECORD_END_TYPE) {
1590 switch (*record) { 1580 switch (*record) {
1591 case LCD_MODE_PATCH_RECORD_MODE_TYPE: 1581 case LCD_MODE_PATCH_RECORD_MODE_TYPE:
@@ -1987,6 +1977,9 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
1987 num_modes = power_info->info.ucNumOfPowerModeEntries; 1977 num_modes = power_info->info.ucNumOfPowerModeEntries;
1988 if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK) 1978 if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK)
1989 num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK; 1979 num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK;
1980 rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * num_modes, GFP_KERNEL);
1981 if (!rdev->pm.power_state)
1982 return state_index;
1990 /* last mode is usually default, array is low to high */ 1983 /* last mode is usually default, array is low to high */
1991 for (i = 0; i < num_modes; i++) { 1984 for (i = 0; i < num_modes; i++) {
1992 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; 1985 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
@@ -2196,7 +2189,7 @@ static u16 radeon_atombios_get_default_vddc(struct radeon_device *rdev)
2196 firmware_info = 2189 firmware_info =
2197 (union firmware_info *)(mode_info->atom_context->bios + 2190 (union firmware_info *)(mode_info->atom_context->bios +
2198 data_offset); 2191 data_offset);
2199 vddc = firmware_info->info_14.usBootUpVDDCVoltage; 2192 vddc = le16_to_cpu(firmware_info->info_14.usBootUpVDDCVoltage);
2200 } 2193 }
2201 2194
2202 return vddc; 2195 return vddc;
@@ -2291,7 +2284,7 @@ static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev,
2291 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type = 2284 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
2292 VOLTAGE_SW; 2285 VOLTAGE_SW;
2293 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = 2286 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
2294 clock_info->evergreen.usVDDC; 2287 le16_to_cpu(clock_info->evergreen.usVDDC);
2295 } else { 2288 } else {
2296 sclk = le16_to_cpu(clock_info->r600.usEngineClockLow); 2289 sclk = le16_to_cpu(clock_info->r600.usEngineClockLow);
2297 sclk |= clock_info->r600.ucEngineClockHigh << 16; 2290 sclk |= clock_info->r600.ucEngineClockHigh << 16;
@@ -2302,7 +2295,7 @@ static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev,
2302 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type = 2295 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
2303 VOLTAGE_SW; 2296 VOLTAGE_SW;
2304 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = 2297 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
2305 clock_info->r600.usVDDC; 2298 le16_to_cpu(clock_info->r600.usVDDC);
2306 } 2299 }
2307 2300
2308 if (rdev->flags & RADEON_IS_IGP) { 2301 if (rdev->flags & RADEON_IS_IGP) {
@@ -2338,6 +2331,10 @@ static int radeon_atombios_parse_power_table_4_5(struct radeon_device *rdev)
2338 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); 2331 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
2339 2332
2340 radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController); 2333 radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController);
2334 rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) *
2335 power_info->pplib.ucNumStates, GFP_KERNEL);
2336 if (!rdev->pm.power_state)
2337 return state_index;
2341 /* first mode is usually default, followed by low to high */ 2338 /* first mode is usually default, followed by low to high */
2342 for (i = 0; i < power_info->pplib.ucNumStates; i++) { 2339 for (i = 0; i < power_info->pplib.ucNumStates; i++) {
2343 mode_index = 0; 2340 mode_index = 0;
@@ -2411,13 +2408,17 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
2411 radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController); 2408 radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController);
2412 state_array = (struct StateArray *) 2409 state_array = (struct StateArray *)
2413 (mode_info->atom_context->bios + data_offset + 2410 (mode_info->atom_context->bios + data_offset +
2414 power_info->pplib.usStateArrayOffset); 2411 le16_to_cpu(power_info->pplib.usStateArrayOffset));
2415 clock_info_array = (struct ClockInfoArray *) 2412 clock_info_array = (struct ClockInfoArray *)
2416 (mode_info->atom_context->bios + data_offset + 2413 (mode_info->atom_context->bios + data_offset +
2417 power_info->pplib.usClockInfoArrayOffset); 2414 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
2418 non_clock_info_array = (struct NonClockInfoArray *) 2415 non_clock_info_array = (struct NonClockInfoArray *)
2419 (mode_info->atom_context->bios + data_offset + 2416 (mode_info->atom_context->bios + data_offset +
2420 power_info->pplib.usNonClockInfoArrayOffset); 2417 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
2418 rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) *
2419 state_array->ucNumEntries, GFP_KERNEL);
2420 if (!rdev->pm.power_state)
2421 return state_index;
2421 for (i = 0; i < state_array->ucNumEntries; i++) { 2422 for (i = 0; i < state_array->ucNumEntries; i++) {
2422 mode_index = 0; 2423 mode_index = 0;
2423 power_state = (union pplib_power_state *)&state_array->states[i]; 2424 power_state = (union pplib_power_state *)&state_array->states[i];
@@ -2491,19 +2492,22 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
2491 break; 2492 break;
2492 } 2493 }
2493 } else { 2494 } else {
2494 /* add the default mode */ 2495 rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state), GFP_KERNEL);
2495 rdev->pm.power_state[state_index].type = 2496 if (rdev->pm.power_state) {
2496 POWER_STATE_TYPE_DEFAULT; 2497 /* add the default mode */
2497 rdev->pm.power_state[state_index].num_clock_modes = 1; 2498 rdev->pm.power_state[state_index].type =
2498 rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk; 2499 POWER_STATE_TYPE_DEFAULT;
2499 rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk; 2500 rdev->pm.power_state[state_index].num_clock_modes = 1;
2500 rdev->pm.power_state[state_index].default_clock_mode = 2501 rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk;
2501 &rdev->pm.power_state[state_index].clock_info[0]; 2502 rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk;
2502 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; 2503 rdev->pm.power_state[state_index].default_clock_mode =
2503 rdev->pm.power_state[state_index].pcie_lanes = 16; 2504 &rdev->pm.power_state[state_index].clock_info[0];
2504 rdev->pm.default_power_state_index = state_index; 2505 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
2505 rdev->pm.power_state[state_index].flags = 0; 2506 rdev->pm.power_state[state_index].pcie_lanes = 16;
2506 state_index++; 2507 rdev->pm.default_power_state_index = state_index;
2508 rdev->pm.power_state[state_index].flags = 0;
2509 state_index++;
2510 }
2507 } 2511 }
2508 2512
2509 rdev->pm.num_power_states = state_index; 2513 rdev->pm.num_power_states = state_index;
@@ -2529,7 +2533,7 @@ uint32_t radeon_atom_get_engine_clock(struct radeon_device *rdev)
2529 int index = GetIndexIntoMasterTable(COMMAND, GetEngineClock); 2533 int index = GetIndexIntoMasterTable(COMMAND, GetEngineClock);
2530 2534
2531 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 2535 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
2532 return args.ulReturnEngineClock; 2536 return le32_to_cpu(args.ulReturnEngineClock);
2533} 2537}
2534 2538
2535uint32_t radeon_atom_get_memory_clock(struct radeon_device *rdev) 2539uint32_t radeon_atom_get_memory_clock(struct radeon_device *rdev)
@@ -2538,7 +2542,7 @@ uint32_t radeon_atom_get_memory_clock(struct radeon_device *rdev)
2538 int index = GetIndexIntoMasterTable(COMMAND, GetMemoryClock); 2542 int index = GetIndexIntoMasterTable(COMMAND, GetMemoryClock);
2539 2543
2540 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 2544 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
2541 return args.ulReturnMemoryClock; 2545 return le32_to_cpu(args.ulReturnMemoryClock);
2542} 2546}
2543 2547
2544void radeon_atom_set_engine_clock(struct radeon_device *rdev, 2548void radeon_atom_set_engine_clock(struct radeon_device *rdev,
@@ -2547,7 +2551,7 @@ void radeon_atom_set_engine_clock(struct radeon_device *rdev,
2547 SET_ENGINE_CLOCK_PS_ALLOCATION args; 2551 SET_ENGINE_CLOCK_PS_ALLOCATION args;
2548 int index = GetIndexIntoMasterTable(COMMAND, SetEngineClock); 2552 int index = GetIndexIntoMasterTable(COMMAND, SetEngineClock);
2549 2553
2550 args.ulTargetEngineClock = eng_clock; /* 10 khz */ 2554 args.ulTargetEngineClock = cpu_to_le32(eng_clock); /* 10 khz */
2551 2555
2552 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 2556 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
2553} 2557}
@@ -2561,7 +2565,7 @@ void radeon_atom_set_memory_clock(struct radeon_device *rdev,
2561 if (rdev->flags & RADEON_IS_IGP) 2565 if (rdev->flags & RADEON_IS_IGP)
2562 return; 2566 return;
2563 2567
2564 args.ulTargetMemoryClock = mem_clock; /* 10 khz */ 2568 args.ulTargetMemoryClock = cpu_to_le32(mem_clock); /* 10 khz */
2565 2569
2566 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 2570 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
2567} 2571}
@@ -2619,7 +2623,7 @@ void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev)
2619 bios_2_scratch &= ~ATOM_S2_VRI_BRIGHT_ENABLE; 2623 bios_2_scratch &= ~ATOM_S2_VRI_BRIGHT_ENABLE;
2620 2624
2621 /* tell the bios not to handle mode switching */ 2625 /* tell the bios not to handle mode switching */
2622 bios_6_scratch |= (ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH | ATOM_S6_ACC_MODE); 2626 bios_6_scratch |= ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH;
2623 2627
2624 if (rdev->family >= CHIP_R600) { 2628 if (rdev->family >= CHIP_R600) {
2625 WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch); 2629 WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch);
@@ -2670,10 +2674,13 @@ void radeon_atom_output_lock(struct drm_encoder *encoder, bool lock)
2670 else 2674 else
2671 bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH); 2675 bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH);
2672 2676
2673 if (lock) 2677 if (lock) {
2674 bios_6_scratch |= ATOM_S6_CRITICAL_STATE; 2678 bios_6_scratch |= ATOM_S6_CRITICAL_STATE;
2675 else 2679 bios_6_scratch &= ~ATOM_S6_ACC_MODE;
2680 } else {
2676 bios_6_scratch &= ~ATOM_S6_CRITICAL_STATE; 2681 bios_6_scratch &= ~ATOM_S6_CRITICAL_STATE;
2682 bios_6_scratch |= ATOM_S6_ACC_MODE;
2683 }
2677 2684
2678 if (rdev->family >= CHIP_R600) 2685 if (rdev->family >= CHIP_R600)
2679 WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch); 2686 WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch);
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 591fcae8f224..cf7c8d5b4ec2 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -1504,6 +1504,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1504 (rdev->pdev->subsystem_device == 0x4a48)) { 1504 (rdev->pdev->subsystem_device == 0x4a48)) {
1505 /* Mac X800 */ 1505 /* Mac X800 */
1506 rdev->mode_info.connector_table = CT_MAC_X800; 1506 rdev->mode_info.connector_table = CT_MAC_X800;
1507 } else if ((rdev->pdev->device == 0x4150) &&
1508 (rdev->pdev->subsystem_vendor == 0x1002) &&
1509 (rdev->pdev->subsystem_device == 0x4150)) {
1510 /* Mac G5 9600 */
1511 rdev->mode_info.connector_table = CT_MAC_G5_9600;
1507 } else 1512 } else
1508#endif /* CONFIG_PPC_PMAC */ 1513#endif /* CONFIG_PPC_PMAC */
1509#ifdef CONFIG_PPC64 1514#ifdef CONFIG_PPC64
@@ -2022,6 +2027,48 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
2022 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I, 2027 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I,
2023 &hpd); 2028 &hpd);
2024 break; 2029 break;
2030 case CT_MAC_G5_9600:
2031 DRM_INFO("Connector Table: %d (mac g5 9600)\n",
2032 rdev->mode_info.connector_table);
2033 /* DVI - tv dac, dvo */
2034 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
2035 hpd.hpd = RADEON_HPD_1; /* ??? */
2036 radeon_add_legacy_encoder(dev,
2037 radeon_get_encoder_enum(dev,
2038 ATOM_DEVICE_DFP2_SUPPORT,
2039 0),
2040 ATOM_DEVICE_DFP2_SUPPORT);
2041 radeon_add_legacy_encoder(dev,
2042 radeon_get_encoder_enum(dev,
2043 ATOM_DEVICE_CRT2_SUPPORT,
2044 2),
2045 ATOM_DEVICE_CRT2_SUPPORT);
2046 radeon_add_legacy_connector(dev, 0,
2047 ATOM_DEVICE_DFP2_SUPPORT |
2048 ATOM_DEVICE_CRT2_SUPPORT,
2049 DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
2050 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
2051 &hpd);
2052 /* ADC - primary dac, internal tmds */
2053 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
2054 hpd.hpd = RADEON_HPD_2; /* ??? */
2055 radeon_add_legacy_encoder(dev,
2056 radeon_get_encoder_enum(dev,
2057 ATOM_DEVICE_DFP1_SUPPORT,
2058 0),
2059 ATOM_DEVICE_DFP1_SUPPORT);
2060 radeon_add_legacy_encoder(dev,
2061 radeon_get_encoder_enum(dev,
2062 ATOM_DEVICE_CRT1_SUPPORT,
2063 1),
2064 ATOM_DEVICE_CRT1_SUPPORT);
2065 radeon_add_legacy_connector(dev, 1,
2066 ATOM_DEVICE_DFP1_SUPPORT |
2067 ATOM_DEVICE_CRT1_SUPPORT,
2068 DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
2069 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
2070 &hpd);
2071 break;
2025 default: 2072 default:
2026 DRM_INFO("Connector table: %d (invalid)\n", 2073 DRM_INFO("Connector table: %d (invalid)\n",
2027 rdev->mode_info.connector_table); 2074 rdev->mode_info.connector_table);
@@ -2442,6 +2489,17 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev)
2442 2489
2443 rdev->pm.default_power_state_index = -1; 2490 rdev->pm.default_power_state_index = -1;
2444 2491
2492 /* allocate 2 power states */
2493 rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * 2, GFP_KERNEL);
2494 if (!rdev->pm.power_state) {
2495 rdev->pm.default_power_state_index = state_index;
2496 rdev->pm.num_power_states = 0;
2497
2498 rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
2499 rdev->pm.current_clock_mode_index = 0;
2500 return;
2501 }
2502
2445 if (rdev->flags & RADEON_IS_MOBILITY) { 2503 if (rdev->flags & RADEON_IS_MOBILITY) {
2446 offset = combios_get_table_offset(dev, COMBIOS_POWERPLAY_INFO_TABLE); 2504 offset = combios_get_table_offset(dev, COMBIOS_POWERPLAY_INFO_TABLE);
2447 if (offset) { 2505 if (offset) {
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 0d478932b1a9..4954e2d6ffa2 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -936,8 +936,11 @@ int radeon_resume_kms(struct drm_device *dev)
936int radeon_gpu_reset(struct radeon_device *rdev) 936int radeon_gpu_reset(struct radeon_device *rdev)
937{ 937{
938 int r; 938 int r;
939 int resched;
939 940
940 radeon_save_bios_scratch_regs(rdev); 941 radeon_save_bios_scratch_regs(rdev);
942 /* block TTM */
943 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
941 radeon_suspend(rdev); 944 radeon_suspend(rdev);
942 945
943 r = radeon_asic_reset(rdev); 946 r = radeon_asic_reset(rdev);
@@ -946,6 +949,7 @@ int radeon_gpu_reset(struct radeon_device *rdev)
946 radeon_resume(rdev); 949 radeon_resume(rdev);
947 radeon_restore_bios_scratch_regs(rdev); 950 radeon_restore_bios_scratch_regs(rdev);
948 drm_helper_resume_force_mode(rdev->ddev); 951 drm_helper_resume_force_mode(rdev->ddev);
952 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
949 return 0; 953 return 0;
950 } 954 }
951 /* bad news, how to tell it to userspace ? */ 955 /* bad news, how to tell it to userspace ? */
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index d26dabf878d9..0e657095de7c 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -780,6 +780,125 @@ static int radeon_ddc_dump(struct drm_connector *connector)
780 return ret; 780 return ret;
781} 781}
782 782
783/* avivo */
784static void avivo_get_fb_div(struct radeon_pll *pll,
785 u32 target_clock,
786 u32 post_div,
787 u32 ref_div,
788 u32 *fb_div,
789 u32 *frac_fb_div)
790{
791 u32 tmp = post_div * ref_div;
792
793 tmp *= target_clock;
794 *fb_div = tmp / pll->reference_freq;
795 *frac_fb_div = tmp % pll->reference_freq;
796
797 if (*fb_div > pll->max_feedback_div)
798 *fb_div = pll->max_feedback_div;
799 else if (*fb_div < pll->min_feedback_div)
800 *fb_div = pll->min_feedback_div;
801}
802
803static u32 avivo_get_post_div(struct radeon_pll *pll,
804 u32 target_clock)
805{
806 u32 vco, post_div, tmp;
807
808 if (pll->flags & RADEON_PLL_USE_POST_DIV)
809 return pll->post_div;
810
811 if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP) {
812 if (pll->flags & RADEON_PLL_IS_LCD)
813 vco = pll->lcd_pll_out_min;
814 else
815 vco = pll->pll_out_min;
816 } else {
817 if (pll->flags & RADEON_PLL_IS_LCD)
818 vco = pll->lcd_pll_out_max;
819 else
820 vco = pll->pll_out_max;
821 }
822
823 post_div = vco / target_clock;
824 tmp = vco % target_clock;
825
826 if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP) {
827 if (tmp)
828 post_div++;
829 } else {
830 if (!tmp)
831 post_div--;
832 }
833
834 if (post_div > pll->max_post_div)
835 post_div = pll->max_post_div;
836 else if (post_div < pll->min_post_div)
837 post_div = pll->min_post_div;
838
839 return post_div;
840}
841
842#define MAX_TOLERANCE 10
843
844void radeon_compute_pll_avivo(struct radeon_pll *pll,
845 u32 freq,
846 u32 *dot_clock_p,
847 u32 *fb_div_p,
848 u32 *frac_fb_div_p,
849 u32 *ref_div_p,
850 u32 *post_div_p)
851{
852 u32 target_clock = freq / 10;
853 u32 post_div = avivo_get_post_div(pll, target_clock);
854 u32 ref_div = pll->min_ref_div;
855 u32 fb_div = 0, frac_fb_div = 0, tmp;
856
857 if (pll->flags & RADEON_PLL_USE_REF_DIV)
858 ref_div = pll->reference_div;
859
860 if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
861 avivo_get_fb_div(pll, target_clock, post_div, ref_div, &fb_div, &frac_fb_div);
862 frac_fb_div = (100 * frac_fb_div) / pll->reference_freq;
863 if (frac_fb_div >= 5) {
864 frac_fb_div -= 5;
865 frac_fb_div = frac_fb_div / 10;
866 frac_fb_div++;
867 }
868 if (frac_fb_div >= 10) {
869 fb_div++;
870 frac_fb_div = 0;
871 }
872 } else {
873 while (ref_div <= pll->max_ref_div) {
874 avivo_get_fb_div(pll, target_clock, post_div, ref_div,
875 &fb_div, &frac_fb_div);
876 if (frac_fb_div >= (pll->reference_freq / 2))
877 fb_div++;
878 frac_fb_div = 0;
879 tmp = (pll->reference_freq * fb_div) / (post_div * ref_div);
880 tmp = (tmp * 10000) / target_clock;
881
882 if (tmp > (10000 + MAX_TOLERANCE))
883 ref_div++;
884 else if (tmp >= (10000 - MAX_TOLERANCE))
885 break;
886 else
887 ref_div++;
888 }
889 }
890
891 *dot_clock_p = ((pll->reference_freq * fb_div * 10) + (pll->reference_freq * frac_fb_div)) /
892 (ref_div * post_div * 10);
893 *fb_div_p = fb_div;
894 *frac_fb_div_p = frac_fb_div;
895 *ref_div_p = ref_div;
896 *post_div_p = post_div;
897 DRM_DEBUG_KMS("%d, pll dividers - fb: %d.%d ref: %d, post %d\n",
898 *dot_clock_p, fb_div, frac_fb_div, ref_div, post_div);
899}
900
901/* pre-avivo */
783static inline uint32_t radeon_div(uint64_t n, uint32_t d) 902static inline uint32_t radeon_div(uint64_t n, uint32_t d)
784{ 903{
785 uint64_t mod; 904 uint64_t mod;
@@ -790,13 +909,13 @@ static inline uint32_t radeon_div(uint64_t n, uint32_t d)
790 return n; 909 return n;
791} 910}
792 911
793void radeon_compute_pll(struct radeon_pll *pll, 912void radeon_compute_pll_legacy(struct radeon_pll *pll,
794 uint64_t freq, 913 uint64_t freq,
795 uint32_t *dot_clock_p, 914 uint32_t *dot_clock_p,
796 uint32_t *fb_div_p, 915 uint32_t *fb_div_p,
797 uint32_t *frac_fb_div_p, 916 uint32_t *frac_fb_div_p,
798 uint32_t *ref_div_p, 917 uint32_t *ref_div_p,
799 uint32_t *post_div_p) 918 uint32_t *post_div_p)
800{ 919{
801 uint32_t min_ref_div = pll->min_ref_div; 920 uint32_t min_ref_div = pll->min_ref_div;
802 uint32_t max_ref_div = pll->max_ref_div; 921 uint32_t max_ref_div = pll->max_ref_div;
@@ -826,6 +945,9 @@ void radeon_compute_pll(struct radeon_pll *pll,
826 pll_out_max = pll->pll_out_max; 945 pll_out_max = pll->pll_out_max;
827 } 946 }
828 947
948 if (pll_out_min > 64800)
949 pll_out_min = 64800;
950
829 if (pll->flags & RADEON_PLL_USE_REF_DIV) 951 if (pll->flags & RADEON_PLL_USE_REF_DIV)
830 min_ref_div = max_ref_div = pll->reference_div; 952 min_ref_div = max_ref_div = pll->reference_div;
831 else { 953 else {
@@ -849,7 +971,7 @@ void radeon_compute_pll(struct radeon_pll *pll,
849 max_fractional_feed_div = pll->max_frac_feedback_div; 971 max_fractional_feed_div = pll->max_frac_feedback_div;
850 } 972 }
851 973
852 for (post_div = max_post_div; post_div >= min_post_div; --post_div) { 974 for (post_div = min_post_div; post_div <= max_post_div; ++post_div) {
853 uint32_t ref_div; 975 uint32_t ref_div;
854 976
855 if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1)) 977 if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1))
@@ -965,6 +1087,10 @@ void radeon_compute_pll(struct radeon_pll *pll,
965 *frac_fb_div_p = best_frac_feedback_div; 1087 *frac_fb_div_p = best_frac_feedback_div;
966 *ref_div_p = best_ref_div; 1088 *ref_div_p = best_ref_div;
967 *post_div_p = best_post_div; 1089 *post_div_p = best_post_div;
1090 DRM_DEBUG_KMS("%d %d, pll dividers - fb: %d.%d ref: %d, post %d\n",
1091 freq, best_freq / 1000, best_feedback_div, best_frac_feedback_div,
1092 best_ref_div, best_post_div);
1093
968} 1094}
969 1095
970static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb) 1096static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index 448eba89d1e6..5cba46b9779a 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -1524,6 +1524,7 @@ extern u32 radeon_get_scratch(drm_radeon_private_t *dev_priv, int index);
1524#define R600_CP_RB_CNTL 0xc104 1524#define R600_CP_RB_CNTL 0xc104
1525# define R600_RB_BUFSZ(x) ((x) << 0) 1525# define R600_RB_BUFSZ(x) ((x) << 0)
1526# define R600_RB_BLKSZ(x) ((x) << 8) 1526# define R600_RB_BLKSZ(x) ((x) << 8)
1527# define R600_BUF_SWAP_32BIT (2 << 16)
1527# define R600_RB_NO_UPDATE (1 << 27) 1528# define R600_RB_NO_UPDATE (1 << 27)
1528# define R600_RB_RPTR_WR_ENA (1 << 31) 1529# define R600_RB_RPTR_WR_ENA (1 << 31)
1529#define R600_CP_RB_RPTR_WR 0xc108 1530#define R600_CP_RB_RPTR_WR 0xc108
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 5e90984d5ad2..b4274883227f 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -910,7 +910,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
910 910
911 args.v1.ucAction = action; 911 args.v1.ucAction = action;
912 if (action == ATOM_TRANSMITTER_ACTION_INIT) { 912 if (action == ATOM_TRANSMITTER_ACTION_INIT) {
913 args.v1.usInitInfo = connector_object_id; 913 args.v1.usInitInfo = cpu_to_le16(connector_object_id);
914 } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) { 914 } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
915 args.v1.asMode.ucLaneSel = lane_num; 915 args.v1.asMode.ucLaneSel = lane_num;
916 args.v1.asMode.ucLaneSet = lane_set; 916 args.v1.asMode.ucLaneSet = lane_set;
@@ -1063,7 +1063,7 @@ atombios_set_edp_panel_power(struct drm_connector *connector, int action)
1063 if (!ASIC_IS_DCE4(rdev)) 1063 if (!ASIC_IS_DCE4(rdev))
1064 return; 1064 return;
1065 1065
1066 if ((action != ATOM_TRANSMITTER_ACTION_POWER_ON) || 1066 if ((action != ATOM_TRANSMITTER_ACTION_POWER_ON) &&
1067 (action != ATOM_TRANSMITTER_ACTION_POWER_OFF)) 1067 (action != ATOM_TRANSMITTER_ACTION_POWER_OFF))
1068 return; 1068 return;
1069 1069
@@ -1140,7 +1140,7 @@ atombios_external_encoder_setup(struct drm_encoder *encoder,
1140 case 3: 1140 case 3:
1141 args.v3.sExtEncoder.ucAction = action; 1141 args.v3.sExtEncoder.ucAction = action;
1142 if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT) 1142 if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT)
1143 args.v3.sExtEncoder.usConnectorId = connector_object_id; 1143 args.v3.sExtEncoder.usConnectorId = cpu_to_le16(connector_object_id);
1144 else 1144 else
1145 args.v3.sExtEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); 1145 args.v3.sExtEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
1146 args.v3.sExtEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder); 1146 args.v3.sExtEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder);
@@ -1570,11 +1570,21 @@ atombios_apply_encoder_quirks(struct drm_encoder *encoder,
1570 } 1570 }
1571 1571
1572 /* set scaler clears this on some chips */ 1572 /* set scaler clears this on some chips */
1573 /* XXX check DCE4 */ 1573 if (ASIC_IS_AVIVO(rdev) &&
1574 if (!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))) { 1574 (!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)))) {
1575 if (ASIC_IS_AVIVO(rdev) && (mode->flags & DRM_MODE_FLAG_INTERLACE)) 1575 if (ASIC_IS_DCE4(rdev)) {
1576 WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 1576 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1577 AVIVO_D1MODE_INTERLEAVE_EN); 1577 WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset,
1578 EVERGREEN_INTERLEAVE_EN);
1579 else
1580 WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
1581 } else {
1582 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1583 WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset,
1584 AVIVO_D1MODE_INTERLEAVE_EN);
1585 else
1586 WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
1587 }
1578 } 1588 }
1579} 1589}
1580 1590
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index ace2e6384d40..cf0638c3b7c7 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -778,9 +778,9 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
778 DRM_DEBUG_KMS("\n"); 778 DRM_DEBUG_KMS("\n");
779 779
780 if (!use_bios_divs) { 780 if (!use_bios_divs) {
781 radeon_compute_pll(pll, mode->clock, 781 radeon_compute_pll_legacy(pll, mode->clock,
782 &freq, &feedback_div, &frac_fb_div, 782 &freq, &feedback_div, &frac_fb_div,
783 &reference_div, &post_divider); 783 &reference_div, &post_divider);
784 784
785 for (post_div = &post_divs[0]; post_div->divider; ++post_div) { 785 for (post_div = &post_divs[0]; post_div->divider; ++post_div) {
786 if (post_div->divider == post_divider) 786 if (post_div->divider == post_divider)
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 12bdeab91c86..a670caaee29e 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -149,6 +149,7 @@ struct radeon_tmds_pll {
149#define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11) 149#define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11)
150#define RADEON_PLL_USE_POST_DIV (1 << 12) 150#define RADEON_PLL_USE_POST_DIV (1 << 12)
151#define RADEON_PLL_IS_LCD (1 << 13) 151#define RADEON_PLL_IS_LCD (1 << 13)
152#define RADEON_PLL_PREFER_MINM_OVER_MAXP (1 << 14)
152 153
153struct radeon_pll { 154struct radeon_pll {
154 /* reference frequency */ 155 /* reference frequency */
@@ -208,6 +209,7 @@ enum radeon_connector_table {
208 CT_EMAC, 209 CT_EMAC,
209 CT_RN50_POWER, 210 CT_RN50_POWER,
210 CT_MAC_X800, 211 CT_MAC_X800,
212 CT_MAC_G5_9600,
211}; 213};
212 214
213enum radeon_dvo_chip { 215enum radeon_dvo_chip {
@@ -510,13 +512,21 @@ extern bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
510 struct radeon_atom_ss *ss, 512 struct radeon_atom_ss *ss,
511 int id, u32 clock); 513 int id, u32 clock);
512 514
513extern void radeon_compute_pll(struct radeon_pll *pll, 515extern void radeon_compute_pll_legacy(struct radeon_pll *pll,
514 uint64_t freq, 516 uint64_t freq,
515 uint32_t *dot_clock_p, 517 uint32_t *dot_clock_p,
516 uint32_t *fb_div_p, 518 uint32_t *fb_div_p,
517 uint32_t *frac_fb_div_p, 519 uint32_t *frac_fb_div_p,
518 uint32_t *ref_div_p, 520 uint32_t *ref_div_p,
519 uint32_t *post_div_p); 521 uint32_t *post_div_p);
522
523extern void radeon_compute_pll_avivo(struct radeon_pll *pll,
524 u32 freq,
525 u32 *dot_clock_p,
526 u32 *fb_div_p,
527 u32 *frac_fb_div_p,
528 u32 *ref_div_p,
529 u32 *post_div_p);
520 530
521extern void radeon_setup_encoder_clones(struct drm_device *dev); 531extern void radeon_setup_encoder_clones(struct drm_device *dev);
522 532
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 3b1b2bf9cdd5..2aed03bde4b2 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -430,7 +430,7 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev,
430{ 430{
431 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 431 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
432 struct radeon_device *rdev = ddev->dev_private; 432 struct radeon_device *rdev = ddev->dev_private;
433 u32 temp; 433 int temp;
434 434
435 switch (rdev->pm.int_thermal_type) { 435 switch (rdev->pm.int_thermal_type) {
436 case THERMAL_TYPE_RV6XX: 436 case THERMAL_TYPE_RV6XX:
@@ -646,6 +646,9 @@ void radeon_pm_fini(struct radeon_device *rdev)
646#endif 646#endif
647 } 647 }
648 648
649 if (rdev->pm.power_state)
650 kfree(rdev->pm.power_state);
651
649 radeon_hwmon_fini(rdev); 652 radeon_hwmon_fini(rdev);
650} 653}
651 654
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 1272e4b6a1d4..e5b2cf10cbf4 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -787,9 +787,9 @@ static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
787 radeon_mem_types_list[i].show = &radeon_mm_dump_table; 787 radeon_mem_types_list[i].show = &radeon_mm_dump_table;
788 radeon_mem_types_list[i].driver_features = 0; 788 radeon_mem_types_list[i].driver_features = 0;
789 if (i == 0) 789 if (i == 0)
790 radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_VRAM].priv; 790 radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
791 else 791 else
792 radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_TT].priv; 792 radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
793 793
794 } 794 }
795 /* Add ttm page pool to debugfs */ 795 /* Add ttm page pool to debugfs */
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r300 b/drivers/gpu/drm/radeon/reg_srcs/r300
index b506ec1cab4b..e8a1786b6426 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/r300
+++ b/drivers/gpu/drm/radeon/reg_srcs/r300
@@ -683,9 +683,7 @@ r300 0x4f60
6830x4DF4 US_ALU_CONST_G_31 6830x4DF4 US_ALU_CONST_G_31
6840x4DF8 US_ALU_CONST_B_31 6840x4DF8 US_ALU_CONST_B_31
6850x4DFC US_ALU_CONST_A_31 6850x4DFC US_ALU_CONST_A_31
6860x4E04 RB3D_BLENDCNTL_R3
6870x4E08 RB3D_ABLENDCNTL_R3 6860x4E08 RB3D_ABLENDCNTL_R3
6880x4E0C RB3D_COLOR_CHANNEL_MASK
6890x4E10 RB3D_CONSTANT_COLOR 6870x4E10 RB3D_CONSTANT_COLOR
6900x4E14 RB3D_COLOR_CLEAR_VALUE 6880x4E14 RB3D_COLOR_CLEAR_VALUE
6910x4E18 RB3D_ROPCNTL_R3 6890x4E18 RB3D_ROPCNTL_R3
@@ -706,13 +704,11 @@ r300 0x4f60
7060x4E74 RB3D_CMASK_WRINDEX 7040x4E74 RB3D_CMASK_WRINDEX
7070x4E78 RB3D_CMASK_DWORD 7050x4E78 RB3D_CMASK_DWORD
7080x4E7C RB3D_CMASK_RDINDEX 7060x4E7C RB3D_CMASK_RDINDEX
7090x4E80 RB3D_AARESOLVE_OFFSET
7100x4E84 RB3D_AARESOLVE_PITCH
7110x4E88 RB3D_AARESOLVE_CTL
7120x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD 7070x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
7130x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD 7080x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD
7140x4F04 ZB_ZSTENCILCNTL 7090x4F04 ZB_ZSTENCILCNTL
7150x4F08 ZB_STENCILREFMASK 7100x4F08 ZB_STENCILREFMASK
7160x4F14 ZB_ZTOP 7110x4F14 ZB_ZTOP
7170x4F18 ZB_ZCACHE_CTLSTAT 7120x4F18 ZB_ZCACHE_CTLSTAT
7130x4F28 ZB_DEPTHCLEARVALUE
7180x4F58 ZB_ZPASS_DATA 7140x4F58 ZB_ZPASS_DATA
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r420 b/drivers/gpu/drm/radeon/reg_srcs/r420
index 8c1214c2390f..722074e21e2f 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/r420
+++ b/drivers/gpu/drm/radeon/reg_srcs/r420
@@ -130,7 +130,6 @@ r420 0x4f60
1300x401C GB_SELECT 1300x401C GB_SELECT
1310x4020 GB_AA_CONFIG 1310x4020 GB_AA_CONFIG
1320x4024 GB_FIFO_SIZE 1320x4024 GB_FIFO_SIZE
1330x4028 GB_Z_PEQ_CONFIG
1340x4100 TX_INVALTAGS 1330x4100 TX_INVALTAGS
1350x4200 GA_POINT_S0 1340x4200 GA_POINT_S0
1360x4204 GA_POINT_T0 1350x4204 GA_POINT_T0
@@ -750,9 +749,7 @@ r420 0x4f60
7500x4DF4 US_ALU_CONST_G_31 7490x4DF4 US_ALU_CONST_G_31
7510x4DF8 US_ALU_CONST_B_31 7500x4DF8 US_ALU_CONST_B_31
7520x4DFC US_ALU_CONST_A_31 7510x4DFC US_ALU_CONST_A_31
7530x4E04 RB3D_BLENDCNTL_R3
7540x4E08 RB3D_ABLENDCNTL_R3 7520x4E08 RB3D_ABLENDCNTL_R3
7550x4E0C RB3D_COLOR_CHANNEL_MASK
7560x4E10 RB3D_CONSTANT_COLOR 7530x4E10 RB3D_CONSTANT_COLOR
7570x4E14 RB3D_COLOR_CLEAR_VALUE 7540x4E14 RB3D_COLOR_CLEAR_VALUE
7580x4E18 RB3D_ROPCNTL_R3 7550x4E18 RB3D_ROPCNTL_R3
@@ -773,13 +770,11 @@ r420 0x4f60
7730x4E74 RB3D_CMASK_WRINDEX 7700x4E74 RB3D_CMASK_WRINDEX
7740x4E78 RB3D_CMASK_DWORD 7710x4E78 RB3D_CMASK_DWORD
7750x4E7C RB3D_CMASK_RDINDEX 7720x4E7C RB3D_CMASK_RDINDEX
7760x4E80 RB3D_AARESOLVE_OFFSET
7770x4E84 RB3D_AARESOLVE_PITCH
7780x4E88 RB3D_AARESOLVE_CTL
7790x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD 7730x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
7800x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD 7740x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD
7810x4F04 ZB_ZSTENCILCNTL 7750x4F04 ZB_ZSTENCILCNTL
7820x4F08 ZB_STENCILREFMASK 7760x4F08 ZB_STENCILREFMASK
7830x4F14 ZB_ZTOP 7770x4F14 ZB_ZTOP
7840x4F18 ZB_ZCACHE_CTLSTAT 7780x4F18 ZB_ZCACHE_CTLSTAT
7790x4F28 ZB_DEPTHCLEARVALUE
7850x4F58 ZB_ZPASS_DATA 7800x4F58 ZB_ZPASS_DATA
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rs600 b/drivers/gpu/drm/radeon/reg_srcs/rs600
index 0828d80396f2..d9f62866bbc1 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/rs600
+++ b/drivers/gpu/drm/radeon/reg_srcs/rs600
@@ -749,9 +749,7 @@ rs600 0x6d40
7490x4DF4 US_ALU_CONST_G_31 7490x4DF4 US_ALU_CONST_G_31
7500x4DF8 US_ALU_CONST_B_31 7500x4DF8 US_ALU_CONST_B_31
7510x4DFC US_ALU_CONST_A_31 7510x4DFC US_ALU_CONST_A_31
7520x4E04 RB3D_BLENDCNTL_R3
7530x4E08 RB3D_ABLENDCNTL_R3 7520x4E08 RB3D_ABLENDCNTL_R3
7540x4E0C RB3D_COLOR_CHANNEL_MASK
7550x4E10 RB3D_CONSTANT_COLOR 7530x4E10 RB3D_CONSTANT_COLOR
7560x4E14 RB3D_COLOR_CLEAR_VALUE 7540x4E14 RB3D_COLOR_CLEAR_VALUE
7570x4E18 RB3D_ROPCNTL_R3 7550x4E18 RB3D_ROPCNTL_R3
@@ -772,13 +770,11 @@ rs600 0x6d40
7720x4E74 RB3D_CMASK_WRINDEX 7700x4E74 RB3D_CMASK_WRINDEX
7730x4E78 RB3D_CMASK_DWORD 7710x4E78 RB3D_CMASK_DWORD
7740x4E7C RB3D_CMASK_RDINDEX 7720x4E7C RB3D_CMASK_RDINDEX
7750x4E80 RB3D_AARESOLVE_OFFSET
7760x4E84 RB3D_AARESOLVE_PITCH
7770x4E88 RB3D_AARESOLVE_CTL
7780x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD 7730x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
7790x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD 7740x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD
7800x4F04 ZB_ZSTENCILCNTL 7750x4F04 ZB_ZSTENCILCNTL
7810x4F08 ZB_STENCILREFMASK 7760x4F08 ZB_STENCILREFMASK
7820x4F14 ZB_ZTOP 7770x4F14 ZB_ZTOP
7830x4F18 ZB_ZCACHE_CTLSTAT 7780x4F18 ZB_ZCACHE_CTLSTAT
7790x4F28 ZB_DEPTHCLEARVALUE
7840x4F58 ZB_ZPASS_DATA 7800x4F58 ZB_ZPASS_DATA
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rv515 b/drivers/gpu/drm/radeon/reg_srcs/rv515
index ef422bbacfc1..911a8fbd32bb 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/rv515
+++ b/drivers/gpu/drm/radeon/reg_srcs/rv515
@@ -164,7 +164,6 @@ rv515 0x6d40
1640x401C GB_SELECT 1640x401C GB_SELECT
1650x4020 GB_AA_CONFIG 1650x4020 GB_AA_CONFIG
1660x4024 GB_FIFO_SIZE 1660x4024 GB_FIFO_SIZE
1670x4028 GB_Z_PEQ_CONFIG
1680x4100 TX_INVALTAGS 1670x4100 TX_INVALTAGS
1690x4114 SU_TEX_WRAP_PS3 1680x4114 SU_TEX_WRAP_PS3
1700x4118 PS3_ENABLE 1690x4118 PS3_ENABLE
@@ -461,9 +460,7 @@ rv515 0x6d40
4610x4DF4 US_ALU_CONST_G_31 4600x4DF4 US_ALU_CONST_G_31
4620x4DF8 US_ALU_CONST_B_31 4610x4DF8 US_ALU_CONST_B_31
4630x4DFC US_ALU_CONST_A_31 4620x4DFC US_ALU_CONST_A_31
4640x4E04 RB3D_BLENDCNTL_R3
4650x4E08 RB3D_ABLENDCNTL_R3 4630x4E08 RB3D_ABLENDCNTL_R3
4660x4E0C RB3D_COLOR_CHANNEL_MASK
4670x4E10 RB3D_CONSTANT_COLOR 4640x4E10 RB3D_CONSTANT_COLOR
4680x4E14 RB3D_COLOR_CLEAR_VALUE 4650x4E14 RB3D_COLOR_CLEAR_VALUE
4690x4E18 RB3D_ROPCNTL_R3 4660x4E18 RB3D_ROPCNTL_R3
@@ -484,9 +481,6 @@ rv515 0x6d40
4840x4E74 RB3D_CMASK_WRINDEX 4810x4E74 RB3D_CMASK_WRINDEX
4850x4E78 RB3D_CMASK_DWORD 4820x4E78 RB3D_CMASK_DWORD
4860x4E7C RB3D_CMASK_RDINDEX 4830x4E7C RB3D_CMASK_RDINDEX
4870x4E80 RB3D_AARESOLVE_OFFSET
4880x4E84 RB3D_AARESOLVE_PITCH
4890x4E88 RB3D_AARESOLVE_CTL
4900x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD 4840x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
4910x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD 4850x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD
4920x4EF8 RB3D_CONSTANT_COLOR_AR 4860x4EF8 RB3D_CONSTANT_COLOR_AR
@@ -496,4 +490,5 @@ rv515 0x6d40
4960x4F14 ZB_ZTOP 4900x4F14 ZB_ZTOP
4970x4F18 ZB_ZCACHE_CTLSTAT 4910x4F18 ZB_ZCACHE_CTLSTAT
4980x4F58 ZB_ZPASS_DATA 4920x4F58 ZB_ZPASS_DATA
4930x4F28 ZB_DEPTHCLEARVALUE
4990x4FD4 ZB_STENCILREFMASK_BF 4940x4FD4 ZB_STENCILREFMASK_BF
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 0137d3e3728d..6638c8e4c81b 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -77,9 +77,9 @@ void rs690_pm_info(struct radeon_device *rdev)
77 switch (crev) { 77 switch (crev) {
78 case 1: 78 case 1:
79 tmp.full = dfixed_const(100); 79 tmp.full = dfixed_const(100);
80 rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info.ulBootUpMemoryClock); 80 rdev->pm.igp_sideport_mclk.full = dfixed_const(le32_to_cpu(info->info.ulBootUpMemoryClock));
81 rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp); 81 rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp);
82 if (info->info.usK8MemoryClock) 82 if (le16_to_cpu(info->info.usK8MemoryClock))
83 rdev->pm.igp_system_mclk.full = dfixed_const(le16_to_cpu(info->info.usK8MemoryClock)); 83 rdev->pm.igp_system_mclk.full = dfixed_const(le16_to_cpu(info->info.usK8MemoryClock));
84 else if (rdev->clock.default_mclk) { 84 else if (rdev->clock.default_mclk) {
85 rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk); 85 rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk);
@@ -91,16 +91,16 @@ void rs690_pm_info(struct radeon_device *rdev)
91 break; 91 break;
92 case 2: 92 case 2:
93 tmp.full = dfixed_const(100); 93 tmp.full = dfixed_const(100);
94 rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info_v2.ulBootUpSidePortClock); 94 rdev->pm.igp_sideport_mclk.full = dfixed_const(le32_to_cpu(info->info_v2.ulBootUpSidePortClock));
95 rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp); 95 rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp);
96 if (info->info_v2.ulBootUpUMAClock) 96 if (le32_to_cpu(info->info_v2.ulBootUpUMAClock))
97 rdev->pm.igp_system_mclk.full = dfixed_const(info->info_v2.ulBootUpUMAClock); 97 rdev->pm.igp_system_mclk.full = dfixed_const(le32_to_cpu(info->info_v2.ulBootUpUMAClock));
98 else if (rdev->clock.default_mclk) 98 else if (rdev->clock.default_mclk)
99 rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk); 99 rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk);
100 else 100 else
101 rdev->pm.igp_system_mclk.full = dfixed_const(66700); 101 rdev->pm.igp_system_mclk.full = dfixed_const(66700);
102 rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp); 102 rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp);
103 rdev->pm.igp_ht_link_clk.full = dfixed_const(info->info_v2.ulHTLinkFreq); 103 rdev->pm.igp_ht_link_clk.full = dfixed_const(le32_to_cpu(info->info_v2.ulHTLinkFreq));
104 rdev->pm.igp_ht_link_clk.full = dfixed_div(rdev->pm.igp_ht_link_clk, tmp); 104 rdev->pm.igp_ht_link_clk.full = dfixed_div(rdev->pm.igp_ht_link_clk, tmp);
105 rdev->pm.igp_ht_link_width.full = dfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth)); 105 rdev->pm.igp_ht_link_width.full = dfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth));
106 break; 106 break;
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 491dc9000655..d8ba67690656 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -78,18 +78,23 @@ u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
78} 78}
79 79
80/* get temperature in millidegrees */ 80/* get temperature in millidegrees */
81u32 rv770_get_temp(struct radeon_device *rdev) 81int rv770_get_temp(struct radeon_device *rdev)
82{ 82{
83 u32 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >> 83 u32 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >>
84 ASIC_T_SHIFT; 84 ASIC_T_SHIFT;
85 u32 actual_temp = 0; 85 int actual_temp;
86 86
87 if ((temp >> 9) & 1) 87 if (temp & 0x400)
88 actual_temp = 0; 88 actual_temp = -256;
89 else 89 else if (temp & 0x200)
90 actual_temp = (temp >> 1) & 0xff; 90 actual_temp = 255;
91 91 else if (temp & 0x100) {
92 return actual_temp * 1000; 92 actual_temp = temp & 0x1ff;
93 actual_temp |= ~0x1ff;
94 } else
95 actual_temp = temp & 0xff;
96
97 return (actual_temp * 1000) / 2;
93} 98}
94 99
95void rv770_pm_misc(struct radeon_device *rdev) 100void rv770_pm_misc(struct radeon_device *rdev)
@@ -316,7 +321,11 @@ static int rv770_cp_load_microcode(struct radeon_device *rdev)
316 return -EINVAL; 321 return -EINVAL;
317 322
318 r700_cp_stop(rdev); 323 r700_cp_stop(rdev);
319 WREG32(CP_RB_CNTL, RB_NO_UPDATE | (15 << 8) | (3 << 0)); 324 WREG32(CP_RB_CNTL,
325#ifdef __BIG_ENDIAN
326 BUF_SWAP_32BIT |
327#endif
328 RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
320 329
321 /* Reset cp */ 330 /* Reset cp */
322 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP); 331 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index abc8cf5a3672..79fa588e9ed5 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -76,10 +76,10 @@
76#define ROQ_IB1_START(x) ((x) << 0) 76#define ROQ_IB1_START(x) ((x) << 0)
77#define ROQ_IB2_START(x) ((x) << 8) 77#define ROQ_IB2_START(x) ((x) << 8)
78#define CP_RB_CNTL 0xC104 78#define CP_RB_CNTL 0xC104
79#define RB_BUFSZ(x) ((x)<<0) 79#define RB_BUFSZ(x) ((x) << 0)
80#define RB_BLKSZ(x) ((x)<<8) 80#define RB_BLKSZ(x) ((x) << 8)
81#define RB_NO_UPDATE (1<<27) 81#define RB_NO_UPDATE (1 << 27)
82#define RB_RPTR_WR_ENA (1<<31) 82#define RB_RPTR_WR_ENA (1 << 31)
83#define BUF_SWAP_32BIT (2 << 16) 83#define BUF_SWAP_32BIT (2 << 16)
84#define CP_RB_RPTR 0x8700 84#define CP_RB_RPTR 0x8700
85#define CP_RB_RPTR_ADDR 0xC10C 85#define CP_RB_RPTR_ADDR 0xC10C
diff --git a/drivers/gpu/stub/Kconfig b/drivers/gpu/stub/Kconfig
index 09aea5f1556d..70e60a4bb678 100644
--- a/drivers/gpu/stub/Kconfig
+++ b/drivers/gpu/stub/Kconfig
@@ -1,11 +1,13 @@
1config STUB_POULSBO 1config STUB_POULSBO
2 tristate "Intel GMA500 Stub Driver" 2 tristate "Intel GMA500 Stub Driver"
3 depends on PCI 3 depends on PCI
4 depends on NET # for THERMAL
4 # Poulsbo stub depends on ACPI_VIDEO when ACPI is enabled 5 # Poulsbo stub depends on ACPI_VIDEO when ACPI is enabled
5 # but for select to work, need to select ACPI_VIDEO's dependencies, ick 6 # but for select to work, need to select ACPI_VIDEO's dependencies, ick
6 select BACKLIGHT_CLASS_DEVICE if ACPI 7 select BACKLIGHT_CLASS_DEVICE if ACPI
7 select INPUT if ACPI 8 select INPUT if ACPI
8 select ACPI_VIDEO if ACPI 9 select ACPI_VIDEO if ACPI
10 select THERMAL if ACPI
9 help 11 help
10 Choose this option if you have a system that has Intel GMA500 12 Choose this option if you have a system that has Intel GMA500
11 (Poulsbo) integrated graphics. If M is selected, the module will 13 (Poulsbo) integrated graphics. If M is selected, the module will
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 773e484f1646..297bc9a7d6e6 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -238,13 +238,13 @@ config SENSORS_K8TEMP
238 will be called k8temp. 238 will be called k8temp.
239 239
240config SENSORS_K10TEMP 240config SENSORS_K10TEMP
241 tristate "AMD Phenom/Sempron/Turion/Opteron temperature sensor" 241 tristate "AMD Family 10h/11h/12h/14h temperature sensor"
242 depends on X86 && PCI 242 depends on X86 && PCI
243 help 243 help
244 If you say yes here you get support for the temperature 244 If you say yes here you get support for the temperature
245 sensor(s) inside your CPU. Supported are later revisions of 245 sensor(s) inside your CPU. Supported are later revisions of
246 the AMD Family 10h and all revisions of the AMD Family 11h 246 the AMD Family 10h and all revisions of the AMD Family 11h,
247 microarchitectures. 247 12h (Llano), and 14h (Brazos) microarchitectures.
248 248
249 This driver can also be built as a module. If so, the module 249 This driver can also be built as a module. If so, the module
250 will be called k10temp. 250 will be called k10temp.
@@ -455,13 +455,14 @@ config SENSORS_JZ4740
455 called jz4740-hwmon. 455 called jz4740-hwmon.
456 456
457config SENSORS_JC42 457config SENSORS_JC42
458 tristate "JEDEC JC42.4 compliant temperature sensors" 458 tristate "JEDEC JC42.4 compliant memory module temperature sensors"
459 depends on I2C 459 depends on I2C
460 help 460 help
461 If you say yes here you get support for Jedec JC42.4 compliant 461 If you say yes here, you get support for JEDEC JC42.4 compliant
462 temperature sensors. Support will include, but not be limited to, 462 temperature sensors, which are used on many DDR3 memory modules for
463 ADT7408, CAT34TS02,, CAT6095, MAX6604, MCP9805, MCP98242, MCP98243, 463 mobile devices and servers. Support will include, but not be limited
464 MCP9843, SE97, SE98, STTS424, TSE2002B3, and TS3000B3. 464 to, ADT7408, CAT34TS02, CAT6095, MAX6604, MCP9805, MCP98242, MCP98243,
465 MCP9843, SE97, SE98, STTS424(E), TSE2002B3, and TS3000B3.
465 466
466 This driver can also be built as a module. If so, the module 467 This driver can also be built as a module. If so, the module
467 will be called jc42. 468 will be called jc42.
@@ -574,7 +575,7 @@ config SENSORS_LM85
574 help 575 help
575 If you say yes here you get support for National Semiconductor LM85 576 If you say yes here you get support for National Semiconductor LM85
576 sensor chips and clones: ADM1027, ADT7463, ADT7468, EMC6D100, 577 sensor chips and clones: ADM1027, ADT7463, ADT7468, EMC6D100,
577 EMC6D101 and EMC6D102. 578 EMC6D101, EMC6D102, and EMC6D103.
578 579
579 This driver can also be built as a module. If so, the module 580 This driver can also be built as a module. If so, the module
580 will be called lm85. 581 will be called lm85.
diff --git a/drivers/hwmon/emc1403.c b/drivers/hwmon/emc1403.c
index 5dea9faa1656..cd2a6e437aec 100644
--- a/drivers/hwmon/emc1403.c
+++ b/drivers/hwmon/emc1403.c
@@ -344,7 +344,7 @@ static int emc1403_remove(struct i2c_client *client)
344} 344}
345 345
346static const unsigned short emc1403_address_list[] = { 346static const unsigned short emc1403_address_list[] = {
347 0x18, 0x2a, 0x4c, 0x4d, I2C_CLIENT_END 347 0x18, 0x29, 0x4c, 0x4d, I2C_CLIENT_END
348}; 348};
349 349
350static const struct i2c_device_id emc1403_idtable[] = { 350static const struct i2c_device_id emc1403_idtable[] = {
diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c
index 340fc78c8dde..934991237061 100644
--- a/drivers/hwmon/jc42.c
+++ b/drivers/hwmon/jc42.c
@@ -53,6 +53,8 @@ static const unsigned short normal_i2c[] = {
53 53
54/* Configuration register defines */ 54/* Configuration register defines */
55#define JC42_CFG_CRIT_ONLY (1 << 2) 55#define JC42_CFG_CRIT_ONLY (1 << 2)
56#define JC42_CFG_TCRIT_LOCK (1 << 6)
57#define JC42_CFG_EVENT_LOCK (1 << 7)
56#define JC42_CFG_SHUTDOWN (1 << 8) 58#define JC42_CFG_SHUTDOWN (1 << 8)
57#define JC42_CFG_HYST_SHIFT 9 59#define JC42_CFG_HYST_SHIFT 9
58#define JC42_CFG_HYST_MASK 0x03 60#define JC42_CFG_HYST_MASK 0x03
@@ -332,7 +334,7 @@ static ssize_t set_temp_crit_hyst(struct device *dev,
332{ 334{
333 struct i2c_client *client = to_i2c_client(dev); 335 struct i2c_client *client = to_i2c_client(dev);
334 struct jc42_data *data = i2c_get_clientdata(client); 336 struct jc42_data *data = i2c_get_clientdata(client);
335 long val; 337 unsigned long val;
336 int diff, hyst; 338 int diff, hyst;
337 int err; 339 int err;
338 int ret = count; 340 int ret = count;
@@ -380,14 +382,14 @@ static ssize_t show_alarm(struct device *dev,
380 382
381static DEVICE_ATTR(temp1_input, S_IRUGO, 383static DEVICE_ATTR(temp1_input, S_IRUGO,
382 show_temp_input, NULL); 384 show_temp_input, NULL);
383static DEVICE_ATTR(temp1_crit, S_IWUSR | S_IRUGO, 385static DEVICE_ATTR(temp1_crit, S_IRUGO,
384 show_temp_crit, set_temp_crit); 386 show_temp_crit, set_temp_crit);
385static DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO, 387static DEVICE_ATTR(temp1_min, S_IRUGO,
386 show_temp_min, set_temp_min); 388 show_temp_min, set_temp_min);
387static DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, 389static DEVICE_ATTR(temp1_max, S_IRUGO,
388 show_temp_max, set_temp_max); 390 show_temp_max, set_temp_max);
389 391
390static DEVICE_ATTR(temp1_crit_hyst, S_IWUSR | S_IRUGO, 392static DEVICE_ATTR(temp1_crit_hyst, S_IRUGO,
391 show_temp_crit_hyst, set_temp_crit_hyst); 393 show_temp_crit_hyst, set_temp_crit_hyst);
392static DEVICE_ATTR(temp1_max_hyst, S_IRUGO, 394static DEVICE_ATTR(temp1_max_hyst, S_IRUGO,
393 show_temp_max_hyst, NULL); 395 show_temp_max_hyst, NULL);
@@ -412,8 +414,31 @@ static struct attribute *jc42_attributes[] = {
412 NULL 414 NULL
413}; 415};
414 416
417static mode_t jc42_attribute_mode(struct kobject *kobj,
418 struct attribute *attr, int index)
419{
420 struct device *dev = container_of(kobj, struct device, kobj);
421 struct i2c_client *client = to_i2c_client(dev);
422 struct jc42_data *data = i2c_get_clientdata(client);
423 unsigned int config = data->config;
424 bool readonly;
425
426 if (attr == &dev_attr_temp1_crit.attr)
427 readonly = config & JC42_CFG_TCRIT_LOCK;
428 else if (attr == &dev_attr_temp1_min.attr ||
429 attr == &dev_attr_temp1_max.attr)
430 readonly = config & JC42_CFG_EVENT_LOCK;
431 else if (attr == &dev_attr_temp1_crit_hyst.attr)
432 readonly = config & (JC42_CFG_EVENT_LOCK | JC42_CFG_TCRIT_LOCK);
433 else
434 readonly = true;
435
436 return S_IRUGO | (readonly ? 0 : S_IWUSR);
437}
438
415static const struct attribute_group jc42_group = { 439static const struct attribute_group jc42_group = {
416 .attrs = jc42_attributes, 440 .attrs = jc42_attributes,
441 .is_visible = jc42_attribute_mode,
417}; 442};
418 443
419/* Return 0 if detection is successful, -ENODEV otherwise */ 444/* Return 0 if detection is successful, -ENODEV otherwise */
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index da5a2404cd3e..82bf65aa2968 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * k10temp.c - AMD Family 10h/11h processor hardware monitoring 2 * k10temp.c - AMD Family 10h/11h/12h/14h processor hardware monitoring
3 * 3 *
4 * Copyright (c) 2009 Clemens Ladisch <clemens@ladisch.de> 4 * Copyright (c) 2009 Clemens Ladisch <clemens@ladisch.de>
5 * 5 *
@@ -25,7 +25,7 @@
25#include <linux/pci.h> 25#include <linux/pci.h>
26#include <asm/processor.h> 26#include <asm/processor.h>
27 27
28MODULE_DESCRIPTION("AMD Family 10h/11h CPU core temperature monitor"); 28MODULE_DESCRIPTION("AMD Family 10h/11h/12h/14h CPU core temperature monitor");
29MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>"); 29MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
30MODULE_LICENSE("GPL"); 30MODULE_LICENSE("GPL");
31 31
@@ -208,6 +208,7 @@ static void __devexit k10temp_remove(struct pci_dev *pdev)
208static const struct pci_device_id k10temp_id_table[] = { 208static const struct pci_device_id k10temp_id_table[] = {
209 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, 209 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
210 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_11H_NB_MISC) }, 210 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_11H_NB_MISC) },
211 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
211 {} 212 {}
212}; 213};
213MODULE_DEVICE_TABLE(pci, k10temp_id_table); 214MODULE_DEVICE_TABLE(pci, k10temp_id_table);
diff --git a/drivers/hwmon/lm63.c b/drivers/hwmon/lm63.c
index 776aeb3019d2..508cb291f71b 100644
--- a/drivers/hwmon/lm63.c
+++ b/drivers/hwmon/lm63.c
@@ -98,6 +98,9 @@ static const unsigned short normal_i2c[] = { 0x18, 0x4c, 0x4e, I2C_CLIENT_END };
98 * value, it uses signed 8-bit values with LSB = 1 degree Celsius. 98 * value, it uses signed 8-bit values with LSB = 1 degree Celsius.
99 * For remote temperature, low and high limits, it uses signed 11-bit values 99 * For remote temperature, low and high limits, it uses signed 11-bit values
100 * with LSB = 0.125 degree Celsius, left-justified in 16-bit registers. 100 * with LSB = 0.125 degree Celsius, left-justified in 16-bit registers.
101 * For LM64 the actual remote diode temperature is 16 degree Celsius higher
102 * than the register reading. Remote temperature setpoints have to be
103 * adapted accordingly.
101 */ 104 */
102 105
103#define FAN_FROM_REG(reg) ((reg) == 0xFFFC || (reg) == 0 ? 0 : \ 106#define FAN_FROM_REG(reg) ((reg) == 0xFFFC || (reg) == 0 ? 0 : \
@@ -165,6 +168,8 @@ struct lm63_data {
165 struct mutex update_lock; 168 struct mutex update_lock;
166 char valid; /* zero until following fields are valid */ 169 char valid; /* zero until following fields are valid */
167 unsigned long last_updated; /* in jiffies */ 170 unsigned long last_updated; /* in jiffies */
171 int kind;
172 int temp2_offset;
168 173
169 /* registers values */ 174 /* registers values */
170 u8 config, config_fan; 175 u8 config, config_fan;
@@ -247,16 +252,34 @@ static ssize_t show_pwm1_enable(struct device *dev, struct device_attribute *dum
247 return sprintf(buf, "%d\n", data->config_fan & 0x20 ? 1 : 2); 252 return sprintf(buf, "%d\n", data->config_fan & 0x20 ? 1 : 2);
248} 253}
249 254
250static ssize_t show_temp8(struct device *dev, struct device_attribute *devattr, 255/*
251 char *buf) 256 * There are 8bit registers for both local(temp1) and remote(temp2) sensor.
257 * For remote sensor registers temp2_offset has to be considered,
258 * for local sensor it must not.
259 * So we need separate 8bit accessors for local and remote sensor.
260 */
261static ssize_t show_local_temp8(struct device *dev,
262 struct device_attribute *devattr,
263 char *buf)
252{ 264{
253 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); 265 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
254 struct lm63_data *data = lm63_update_device(dev); 266 struct lm63_data *data = lm63_update_device(dev);
255 return sprintf(buf, "%d\n", TEMP8_FROM_REG(data->temp8[attr->index])); 267 return sprintf(buf, "%d\n", TEMP8_FROM_REG(data->temp8[attr->index]));
256} 268}
257 269
258static ssize_t set_temp8(struct device *dev, struct device_attribute *dummy, 270static ssize_t show_remote_temp8(struct device *dev,
259 const char *buf, size_t count) 271 struct device_attribute *devattr,
272 char *buf)
273{
274 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
275 struct lm63_data *data = lm63_update_device(dev);
276 return sprintf(buf, "%d\n", TEMP8_FROM_REG(data->temp8[attr->index])
277 + data->temp2_offset);
278}
279
280static ssize_t set_local_temp8(struct device *dev,
281 struct device_attribute *dummy,
282 const char *buf, size_t count)
260{ 283{
261 struct i2c_client *client = to_i2c_client(dev); 284 struct i2c_client *client = to_i2c_client(dev);
262 struct lm63_data *data = i2c_get_clientdata(client); 285 struct lm63_data *data = i2c_get_clientdata(client);
@@ -274,7 +297,8 @@ static ssize_t show_temp11(struct device *dev, struct device_attribute *devattr,
274{ 297{
275 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); 298 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
276 struct lm63_data *data = lm63_update_device(dev); 299 struct lm63_data *data = lm63_update_device(dev);
277 return sprintf(buf, "%d\n", TEMP11_FROM_REG(data->temp11[attr->index])); 300 return sprintf(buf, "%d\n", TEMP11_FROM_REG(data->temp11[attr->index])
301 + data->temp2_offset);
278} 302}
279 303
280static ssize_t set_temp11(struct device *dev, struct device_attribute *devattr, 304static ssize_t set_temp11(struct device *dev, struct device_attribute *devattr,
@@ -294,7 +318,7 @@ static ssize_t set_temp11(struct device *dev, struct device_attribute *devattr,
294 int nr = attr->index; 318 int nr = attr->index;
295 319
296 mutex_lock(&data->update_lock); 320 mutex_lock(&data->update_lock);
297 data->temp11[nr] = TEMP11_TO_REG(val); 321 data->temp11[nr] = TEMP11_TO_REG(val - data->temp2_offset);
298 i2c_smbus_write_byte_data(client, reg[(nr - 1) * 2], 322 i2c_smbus_write_byte_data(client, reg[(nr - 1) * 2],
299 data->temp11[nr] >> 8); 323 data->temp11[nr] >> 8);
300 i2c_smbus_write_byte_data(client, reg[(nr - 1) * 2 + 1], 324 i2c_smbus_write_byte_data(client, reg[(nr - 1) * 2 + 1],
@@ -310,6 +334,7 @@ static ssize_t show_temp2_crit_hyst(struct device *dev, struct device_attribute
310{ 334{
311 struct lm63_data *data = lm63_update_device(dev); 335 struct lm63_data *data = lm63_update_device(dev);
312 return sprintf(buf, "%d\n", TEMP8_FROM_REG(data->temp8[2]) 336 return sprintf(buf, "%d\n", TEMP8_FROM_REG(data->temp8[2])
337 + data->temp2_offset
313 - TEMP8_FROM_REG(data->temp2_crit_hyst)); 338 - TEMP8_FROM_REG(data->temp2_crit_hyst));
314} 339}
315 340
@@ -324,7 +349,7 @@ static ssize_t set_temp2_crit_hyst(struct device *dev, struct device_attribute *
324 long hyst; 349 long hyst;
325 350
326 mutex_lock(&data->update_lock); 351 mutex_lock(&data->update_lock);
327 hyst = TEMP8_FROM_REG(data->temp8[2]) - val; 352 hyst = TEMP8_FROM_REG(data->temp8[2]) + data->temp2_offset - val;
328 i2c_smbus_write_byte_data(client, LM63_REG_REMOTE_TCRIT_HYST, 353 i2c_smbus_write_byte_data(client, LM63_REG_REMOTE_TCRIT_HYST,
329 HYST_TO_REG(hyst)); 354 HYST_TO_REG(hyst));
330 mutex_unlock(&data->update_lock); 355 mutex_unlock(&data->update_lock);
@@ -355,16 +380,21 @@ static SENSOR_DEVICE_ATTR(fan1_min, S_IWUSR | S_IRUGO, show_fan,
355static DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm1, set_pwm1); 380static DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm1, set_pwm1);
356static DEVICE_ATTR(pwm1_enable, S_IRUGO, show_pwm1_enable, NULL); 381static DEVICE_ATTR(pwm1_enable, S_IRUGO, show_pwm1_enable, NULL);
357 382
358static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp8, NULL, 0); 383static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_local_temp8, NULL, 0);
359static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp8, 384static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_local_temp8,
360 set_temp8, 1); 385 set_local_temp8, 1);
361 386
362static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp11, NULL, 0); 387static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp11, NULL, 0);
363static SENSOR_DEVICE_ATTR(temp2_min, S_IWUSR | S_IRUGO, show_temp11, 388static SENSOR_DEVICE_ATTR(temp2_min, S_IWUSR | S_IRUGO, show_temp11,
364 set_temp11, 1); 389 set_temp11, 1);
365static SENSOR_DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_temp11, 390static SENSOR_DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_temp11,
366 set_temp11, 2); 391 set_temp11, 2);
367static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, show_temp8, NULL, 2); 392/*
393 * On LM63, temp2_crit can be set only once, which should be job
394 * of the bootloader.
395 */
396static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, show_remote_temp8,
397 NULL, 2);
368static DEVICE_ATTR(temp2_crit_hyst, S_IWUSR | S_IRUGO, show_temp2_crit_hyst, 398static DEVICE_ATTR(temp2_crit_hyst, S_IWUSR | S_IRUGO, show_temp2_crit_hyst,
369 set_temp2_crit_hyst); 399 set_temp2_crit_hyst);
370 400
@@ -479,7 +509,12 @@ static int lm63_probe(struct i2c_client *new_client,
479 data->valid = 0; 509 data->valid = 0;
480 mutex_init(&data->update_lock); 510 mutex_init(&data->update_lock);
481 511
482 /* Initialize the LM63 chip */ 512 /* Set the device type */
513 data->kind = id->driver_data;
514 if (data->kind == lm64)
515 data->temp2_offset = 16000;
516
517 /* Initialize chip */
483 lm63_init_client(new_client); 518 lm63_init_client(new_client);
484 519
485 /* Register sysfs hooks */ 520 /* Register sysfs hooks */
diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c
index 1e229847f37a..d2cc28660816 100644
--- a/drivers/hwmon/lm85.c
+++ b/drivers/hwmon/lm85.c
@@ -41,7 +41,7 @@ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
41enum chips { 41enum chips {
42 any_chip, lm85b, lm85c, 42 any_chip, lm85b, lm85c,
43 adm1027, adt7463, adt7468, 43 adm1027, adt7463, adt7468,
44 emc6d100, emc6d102 44 emc6d100, emc6d102, emc6d103
45}; 45};
46 46
47/* The LM85 registers */ 47/* The LM85 registers */
@@ -90,6 +90,9 @@ enum chips {
90#define LM85_VERSTEP_EMC6D100_A0 0x60 90#define LM85_VERSTEP_EMC6D100_A0 0x60
91#define LM85_VERSTEP_EMC6D100_A1 0x61 91#define LM85_VERSTEP_EMC6D100_A1 0x61
92#define LM85_VERSTEP_EMC6D102 0x65 92#define LM85_VERSTEP_EMC6D102 0x65
93#define LM85_VERSTEP_EMC6D103_A0 0x68
94#define LM85_VERSTEP_EMC6D103_A1 0x69
95#define LM85_VERSTEP_EMC6D103S 0x6A /* Also known as EMC6D103:A2 */
93 96
94#define LM85_REG_CONFIG 0x40 97#define LM85_REG_CONFIG 0x40
95 98
@@ -348,6 +351,7 @@ static const struct i2c_device_id lm85_id[] = {
348 { "emc6d100", emc6d100 }, 351 { "emc6d100", emc6d100 },
349 { "emc6d101", emc6d100 }, 352 { "emc6d101", emc6d100 },
350 { "emc6d102", emc6d102 }, 353 { "emc6d102", emc6d102 },
354 { "emc6d103", emc6d103 },
351 { } 355 { }
352}; 356};
353MODULE_DEVICE_TABLE(i2c, lm85_id); 357MODULE_DEVICE_TABLE(i2c, lm85_id);
@@ -1250,6 +1254,20 @@ static int lm85_detect(struct i2c_client *client, struct i2c_board_info *info)
1250 case LM85_VERSTEP_EMC6D102: 1254 case LM85_VERSTEP_EMC6D102:
1251 type_name = "emc6d102"; 1255 type_name = "emc6d102";
1252 break; 1256 break;
1257 case LM85_VERSTEP_EMC6D103_A0:
1258 case LM85_VERSTEP_EMC6D103_A1:
1259 type_name = "emc6d103";
1260 break;
1261 /*
1262 * Registers apparently missing in EMC6D103S/EMC6D103:A2
1263 * compared to EMC6D103:A0, EMC6D103:A1, and EMC6D102
1264 * (according to the data sheets), but used unconditionally
1265 * in the driver: 62[5:7], 6D[0:7], and 6E[0:7].
1266 * So skip EMC6D103S for now.
1267 case LM85_VERSTEP_EMC6D103S:
1268 type_name = "emc6d103s";
1269 break;
1270 */
1253 } 1271 }
1254 } else { 1272 } else {
1255 dev_dbg(&adapter->dev, 1273 dev_dbg(&adapter->dev,
@@ -1283,6 +1301,7 @@ static int lm85_probe(struct i2c_client *client,
1283 case adt7468: 1301 case adt7468:
1284 case emc6d100: 1302 case emc6d100:
1285 case emc6d102: 1303 case emc6d102:
1304 case emc6d103:
1286 data->freq_map = adm1027_freq_map; 1305 data->freq_map = adm1027_freq_map;
1287 break; 1306 break;
1288 default: 1307 default:
@@ -1468,7 +1487,7 @@ static struct lm85_data *lm85_update_device(struct device *dev)
1468 /* More alarm bits */ 1487 /* More alarm bits */
1469 data->alarms |= lm85_read_value(client, 1488 data->alarms |= lm85_read_value(client,
1470 EMC6D100_REG_ALARM3) << 16; 1489 EMC6D100_REG_ALARM3) << 16;
1471 } else if (data->type == emc6d102) { 1490 } else if (data->type == emc6d102 || data->type == emc6d103) {
1472 /* Have to read LSB bits after the MSB ones because 1491 /* Have to read LSB bits after the MSB ones because
1473 the reading of the MSB bits has frozen the 1492 the reading of the MSB bits has frozen the
1474 LSBs (backward from the ADM1027). 1493 LSBs (backward from the ADM1027).
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index e38be1bcc01c..fbbfa24cf572 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -1079,7 +1079,7 @@ static void ib_sa_remove_one(struct ib_device *device)
1079 1079
1080 ib_unregister_event_handler(&sa_dev->event_handler); 1080 ib_unregister_event_handler(&sa_dev->event_handler);
1081 1081
1082 flush_scheduled_work(); 1082 flush_workqueue(ib_wq);
1083 1083
1084 for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) { 1084 for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) {
1085 if (rdma_port_get_link_layer(device, i + 1) == IB_LINK_LAYER_INFINIBAND) { 1085 if (rdma_port_get_link_layer(device, i + 1) == IB_LINK_LAYER_INFINIBAND) {
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index ca12acf38379..ec1e9da1488b 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -636,6 +636,16 @@ static void ucma_copy_iboe_route(struct rdma_ucm_query_route_resp *resp,
636 } 636 }
637} 637}
638 638
639static void ucma_copy_iw_route(struct rdma_ucm_query_route_resp *resp,
640 struct rdma_route *route)
641{
642 struct rdma_dev_addr *dev_addr;
643
644 dev_addr = &route->addr.dev_addr;
645 rdma_addr_get_dgid(dev_addr, (union ib_gid *) &resp->ib_route[0].dgid);
646 rdma_addr_get_sgid(dev_addr, (union ib_gid *) &resp->ib_route[0].sgid);
647}
648
639static ssize_t ucma_query_route(struct ucma_file *file, 649static ssize_t ucma_query_route(struct ucma_file *file,
640 const char __user *inbuf, 650 const char __user *inbuf,
641 int in_len, int out_len) 651 int in_len, int out_len)
@@ -670,8 +680,10 @@ static ssize_t ucma_query_route(struct ucma_file *file,
670 680
671 resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid; 681 resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid;
672 resp.port_num = ctx->cm_id->port_num; 682 resp.port_num = ctx->cm_id->port_num;
673 if (rdma_node_get_transport(ctx->cm_id->device->node_type) == RDMA_TRANSPORT_IB) { 683 switch (rdma_node_get_transport(ctx->cm_id->device->node_type)) {
674 switch (rdma_port_get_link_layer(ctx->cm_id->device, ctx->cm_id->port_num)) { 684 case RDMA_TRANSPORT_IB:
685 switch (rdma_port_get_link_layer(ctx->cm_id->device,
686 ctx->cm_id->port_num)) {
675 case IB_LINK_LAYER_INFINIBAND: 687 case IB_LINK_LAYER_INFINIBAND:
676 ucma_copy_ib_route(&resp, &ctx->cm_id->route); 688 ucma_copy_ib_route(&resp, &ctx->cm_id->route);
677 break; 689 break;
@@ -681,6 +693,12 @@ static ssize_t ucma_query_route(struct ucma_file *file,
681 default: 693 default:
682 break; 694 break;
683 } 695 }
696 break;
697 case RDMA_TRANSPORT_IWARP:
698 ucma_copy_iw_route(&resp, &ctx->cm_id->route);
699 break;
700 default:
701 break;
684 } 702 }
685 703
686out: 704out:
diff --git a/drivers/infiniband/hw/amso1100/c2_vq.c b/drivers/infiniband/hw/amso1100/c2_vq.c
index 9ce7819b7b2e..2ec716fb2edb 100644
--- a/drivers/infiniband/hw/amso1100/c2_vq.c
+++ b/drivers/infiniband/hw/amso1100/c2_vq.c
@@ -107,7 +107,7 @@ struct c2_vq_req *vq_req_alloc(struct c2_dev *c2dev)
107 r = kmalloc(sizeof(struct c2_vq_req), GFP_KERNEL); 107 r = kmalloc(sizeof(struct c2_vq_req), GFP_KERNEL);
108 if (r) { 108 if (r) {
109 init_waitqueue_head(&r->wait_object); 109 init_waitqueue_head(&r->wait_object);
110 r->reply_msg = (u64) NULL; 110 r->reply_msg = 0;
111 r->event = 0; 111 r->event = 0;
112 r->cm_id = NULL; 112 r->cm_id = NULL;
113 r->qp = NULL; 113 r->qp = NULL;
@@ -123,7 +123,7 @@ struct c2_vq_req *vq_req_alloc(struct c2_dev *c2dev)
123 */ 123 */
124void vq_req_free(struct c2_dev *c2dev, struct c2_vq_req *r) 124void vq_req_free(struct c2_dev *c2dev, struct c2_vq_req *r)
125{ 125{
126 r->reply_msg = (u64) NULL; 126 r->reply_msg = 0;
127 if (atomic_dec_and_test(&r->refcnt)) { 127 if (atomic_dec_and_test(&r->refcnt)) {
128 kfree(r); 128 kfree(r);
129 } 129 }
@@ -151,7 +151,7 @@ void vq_req_get(struct c2_dev *c2dev, struct c2_vq_req *r)
151void vq_req_put(struct c2_dev *c2dev, struct c2_vq_req *r) 151void vq_req_put(struct c2_dev *c2dev, struct c2_vq_req *r)
152{ 152{
153 if (atomic_dec_and_test(&r->refcnt)) { 153 if (atomic_dec_and_test(&r->refcnt)) {
154 if (r->reply_msg != (u64) NULL) 154 if (r->reply_msg != 0)
155 vq_repbuf_free(c2dev, 155 vq_repbuf_free(c2dev,
156 (void *) (unsigned long) r->reply_msg); 156 (void *) (unsigned long) r->reply_msg);
157 kfree(r); 157 kfree(r);
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 0dc62b1438be..8b00e6c46f01 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -380,7 +380,7 @@ static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb)
380 16)) | FW_WR_FLOWID(ep->hwtid)); 380 16)) | FW_WR_FLOWID(ep->hwtid));
381 381
382 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; 382 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
383 flowc->mnemval[0].val = cpu_to_be32(0); 383 flowc->mnemval[0].val = cpu_to_be32(PCI_FUNC(ep->com.dev->rdev.lldi.pdev->devfn) << 8);
384 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; 384 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
385 flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan); 385 flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan);
386 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; 386 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 20800900ef3f..4f0be25cab1a 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -220,7 +220,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
220 V_FW_RI_RES_WR_DCAEN(0) | 220 V_FW_RI_RES_WR_DCAEN(0) |
221 V_FW_RI_RES_WR_DCACPU(0) | 221 V_FW_RI_RES_WR_DCACPU(0) |
222 V_FW_RI_RES_WR_FBMIN(2) | 222 V_FW_RI_RES_WR_FBMIN(2) |
223 V_FW_RI_RES_WR_FBMAX(3) | 223 V_FW_RI_RES_WR_FBMAX(2) |
224 V_FW_RI_RES_WR_CIDXFTHRESHO(0) | 224 V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
225 V_FW_RI_RES_WR_CIDXFTHRESH(0) | 225 V_FW_RI_RES_WR_CIDXFTHRESH(0) |
226 V_FW_RI_RES_WR_EQSIZE(eqsize)); 226 V_FW_RI_RES_WR_EQSIZE(eqsize));
@@ -243,7 +243,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
243 V_FW_RI_RES_WR_DCAEN(0) | 243 V_FW_RI_RES_WR_DCAEN(0) |
244 V_FW_RI_RES_WR_DCACPU(0) | 244 V_FW_RI_RES_WR_DCACPU(0) |
245 V_FW_RI_RES_WR_FBMIN(2) | 245 V_FW_RI_RES_WR_FBMIN(2) |
246 V_FW_RI_RES_WR_FBMAX(3) | 246 V_FW_RI_RES_WR_FBMAX(2) |
247 V_FW_RI_RES_WR_CIDXFTHRESHO(0) | 247 V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
248 V_FW_RI_RES_WR_CIDXFTHRESH(0) | 248 V_FW_RI_RES_WR_CIDXFTHRESH(0) |
249 V_FW_RI_RES_WR_EQSIZE(eqsize)); 249 V_FW_RI_RES_WR_EQSIZE(eqsize));
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 8b606fd64022..08c194861af5 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -2610,9 +2610,11 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
2610 netif_carrier_on(nesvnic->netdev); 2610 netif_carrier_on(nesvnic->netdev);
2611 2611
2612 spin_lock(&nesvnic->port_ibevent_lock); 2612 spin_lock(&nesvnic->port_ibevent_lock);
2613 if (nesdev->iw_status == 0) { 2613 if (nesvnic->of_device_registered) {
2614 nesdev->iw_status = 1; 2614 if (nesdev->iw_status == 0) {
2615 nes_port_ibevent(nesvnic); 2615 nesdev->iw_status = 1;
2616 nes_port_ibevent(nesvnic);
2617 }
2616 } 2618 }
2617 spin_unlock(&nesvnic->port_ibevent_lock); 2619 spin_unlock(&nesvnic->port_ibevent_lock);
2618 } 2620 }
@@ -2642,9 +2644,11 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
2642 netif_carrier_off(nesvnic->netdev); 2644 netif_carrier_off(nesvnic->netdev);
2643 2645
2644 spin_lock(&nesvnic->port_ibevent_lock); 2646 spin_lock(&nesvnic->port_ibevent_lock);
2645 if (nesdev->iw_status == 1) { 2647 if (nesvnic->of_device_registered) {
2646 nesdev->iw_status = 0; 2648 if (nesdev->iw_status == 1) {
2647 nes_port_ibevent(nesvnic); 2649 nesdev->iw_status = 0;
2650 nes_port_ibevent(nesvnic);
2651 }
2648 } 2652 }
2649 spin_unlock(&nesvnic->port_ibevent_lock); 2653 spin_unlock(&nesvnic->port_ibevent_lock);
2650 } 2654 }
@@ -2703,9 +2707,11 @@ void nes_recheck_link_status(struct work_struct *work)
2703 netif_carrier_on(nesvnic->netdev); 2707 netif_carrier_on(nesvnic->netdev);
2704 2708
2705 spin_lock(&nesvnic->port_ibevent_lock); 2709 spin_lock(&nesvnic->port_ibevent_lock);
2706 if (nesdev->iw_status == 0) { 2710 if (nesvnic->of_device_registered) {
2707 nesdev->iw_status = 1; 2711 if (nesdev->iw_status == 0) {
2708 nes_port_ibevent(nesvnic); 2712 nesdev->iw_status = 1;
2713 nes_port_ibevent(nesvnic);
2714 }
2709 } 2715 }
2710 spin_unlock(&nesvnic->port_ibevent_lock); 2716 spin_unlock(&nesvnic->port_ibevent_lock);
2711 } 2717 }
@@ -2723,9 +2729,11 @@ void nes_recheck_link_status(struct work_struct *work)
2723 netif_carrier_off(nesvnic->netdev); 2729 netif_carrier_off(nesvnic->netdev);
2724 2730
2725 spin_lock(&nesvnic->port_ibevent_lock); 2731 spin_lock(&nesvnic->port_ibevent_lock);
2726 if (nesdev->iw_status == 1) { 2732 if (nesvnic->of_device_registered) {
2727 nesdev->iw_status = 0; 2733 if (nesdev->iw_status == 1) {
2728 nes_port_ibevent(nesvnic); 2734 nesdev->iw_status = 0;
2735 nes_port_ibevent(nesvnic);
2736 }
2729 } 2737 }
2730 spin_unlock(&nesvnic->port_ibevent_lock); 2738 spin_unlock(&nesvnic->port_ibevent_lock);
2731 } 2739 }
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 50cceb3ab885..b01809a82cb0 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -623,7 +623,6 @@ struct qib_chippport_specific {
623 u8 ibmalfusesnap; 623 u8 ibmalfusesnap;
624 struct qib_qsfp_data qsfp_data; 624 struct qib_qsfp_data qsfp_data;
625 char epmsgbuf[192]; /* for port error interrupt msg buffer */ 625 char epmsgbuf[192]; /* for port error interrupt msg buffer */
626 u8 bounced;
627}; 626};
628 627
629static struct { 628static struct {
@@ -1881,23 +1880,7 @@ static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
1881 IB_PHYSPORTSTATE_DISABLED) 1880 IB_PHYSPORTSTATE_DISABLED)
1882 qib_set_ib_7322_lstate(ppd, 0, 1881 qib_set_ib_7322_lstate(ppd, 0,
1883 QLOGIC_IB_IBCC_LINKINITCMD_DISABLE); 1882 QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
1884 else { 1883 else
1885 u32 lstate;
1886 /*
1887 * We need the current logical link state before
1888 * lflags are set in handle_e_ibstatuschanged.
1889 */
1890 lstate = qib_7322_iblink_state(ibcs);
1891
1892 if (IS_QMH(dd) && !ppd->cpspec->bounced &&
1893 ltstate == IB_PHYSPORTSTATE_LINKUP &&
1894 (lstate >= IB_PORT_INIT &&
1895 lstate <= IB_PORT_ACTIVE)) {
1896 ppd->cpspec->bounced = 1;
1897 qib_7322_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
1898 IB_LINKCMD_DOWN | IB_LINKINITCMD_POLL);
1899 }
1900
1901 /* 1884 /*
1902 * Since going into a recovery state causes the link 1885 * Since going into a recovery state causes the link
1903 * state to go down and since recovery is transitory, 1886 * state to go down and since recovery is transitory,
@@ -1911,7 +1894,6 @@ static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
1911 ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT && 1894 ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
1912 ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE) 1895 ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
1913 qib_handle_e_ibstatuschanged(ppd, ibcs); 1896 qib_handle_e_ibstatuschanged(ppd, ibcs);
1914 }
1915 } 1897 }
1916 if (*msg && iserr) 1898 if (*msg && iserr)
1917 qib_dev_porterr(dd, ppd->port, "%s error\n", msg); 1899 qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
@@ -2381,6 +2363,11 @@ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
2381 qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl); 2363 qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
2382 spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags); 2364 spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
2383 2365
2366 /* Hold the link state machine for mezz boards */
2367 if (IS_QMH(dd) || IS_QME(dd))
2368 qib_set_ib_7322_lstate(ppd, 0,
2369 QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
2370
2384 /* Also enable IBSTATUSCHG interrupt. */ 2371 /* Also enable IBSTATUSCHG interrupt. */
2385 val = qib_read_kreg_port(ppd, krp_errmask); 2372 val = qib_read_kreg_port(ppd, krp_errmask);
2386 qib_write_kreg_port(ppd, krp_errmask, 2373 qib_write_kreg_port(ppd, krp_errmask,
@@ -5702,6 +5689,11 @@ static void set_no_qsfp_atten(struct qib_devdata *dd, int change)
5702 ppd->cpspec->h1_val = h1; 5689 ppd->cpspec->h1_val = h1;
5703 /* now change the IBC and serdes, overriding generic */ 5690 /* now change the IBC and serdes, overriding generic */
5704 init_txdds_table(ppd, 1); 5691 init_txdds_table(ppd, 1);
5692 /* Re-enable the physical state machine on mezz boards
5693 * now that the correct settings have been set. */
5694 if (IS_QMH(dd) || IS_QME(dd))
5695 qib_set_ib_7322_lstate(ppd, 0,
5696 QLOGIC_IB_IBCC_LINKINITCMD_SLEEP);
5705 any++; 5697 any++;
5706 } 5698 }
5707 if (*nxt == '\n') 5699 if (*nxt == '\n')
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index 8245237b67ce..eca0c41f1226 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -1005,7 +1005,8 @@ void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr)
1005 * there are still requests that haven't been acked. 1005 * there are still requests that haven't been acked.
1006 */ 1006 */
1007 if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail && 1007 if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail &&
1008 !(qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR | QIB_S_WAIT_PSN))) 1008 !(qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR | QIB_S_WAIT_PSN)) &&
1009 (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
1009 start_timer(qp); 1010 start_timer(qp);
1010 1011
1011 while (qp->s_last != qp->s_acked) { 1012 while (qp->s_last != qp->s_acked) {
@@ -1439,6 +1440,8 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp,
1439 } 1440 }
1440 1441
1441 spin_lock_irqsave(&qp->s_lock, flags); 1442 spin_lock_irqsave(&qp->s_lock, flags);
1443 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
1444 goto ack_done;
1442 1445
1443 /* Ignore invalid responses. */ 1446 /* Ignore invalid responses. */
1444 if (qib_cmp24(psn, qp->s_next_psn) >= 0) 1447 if (qib_cmp24(psn, qp->s_next_psn) >= 0)
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 7985114beac7..11905b6a3023 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -75,7 +75,6 @@ static int input_defuzz_abs_event(int value, int old_val, int fuzz)
75 * dev->event_lock held and interrupts disabled. 75 * dev->event_lock held and interrupts disabled.
76 */ 76 */
77static void input_pass_event(struct input_dev *dev, 77static void input_pass_event(struct input_dev *dev,
78 struct input_handler *src_handler,
79 unsigned int type, unsigned int code, int value) 78 unsigned int type, unsigned int code, int value)
80{ 79{
81 struct input_handler *handler; 80 struct input_handler *handler;
@@ -94,15 +93,6 @@ static void input_pass_event(struct input_dev *dev,
94 continue; 93 continue;
95 94
96 handler = handle->handler; 95 handler = handle->handler;
97
98 /*
99 * If this is the handler that injected this
100 * particular event we want to skip it to avoid
101 * filters firing again and again.
102 */
103 if (handler == src_handler)
104 continue;
105
106 if (!handler->filter) { 96 if (!handler->filter) {
107 if (filtered) 97 if (filtered)
108 break; 98 break;
@@ -132,7 +122,7 @@ static void input_repeat_key(unsigned long data)
132 if (test_bit(dev->repeat_key, dev->key) && 122 if (test_bit(dev->repeat_key, dev->key) &&
133 is_event_supported(dev->repeat_key, dev->keybit, KEY_MAX)) { 123 is_event_supported(dev->repeat_key, dev->keybit, KEY_MAX)) {
134 124
135 input_pass_event(dev, NULL, EV_KEY, dev->repeat_key, 2); 125 input_pass_event(dev, EV_KEY, dev->repeat_key, 2);
136 126
137 if (dev->sync) { 127 if (dev->sync) {
138 /* 128 /*
@@ -141,7 +131,7 @@ static void input_repeat_key(unsigned long data)
141 * Otherwise assume that the driver will send 131 * Otherwise assume that the driver will send
142 * SYN_REPORT once it's done. 132 * SYN_REPORT once it's done.
143 */ 133 */
144 input_pass_event(dev, NULL, EV_SYN, SYN_REPORT, 1); 134 input_pass_event(dev, EV_SYN, SYN_REPORT, 1);
145 } 135 }
146 136
147 if (dev->rep[REP_PERIOD]) 137 if (dev->rep[REP_PERIOD])
@@ -174,7 +164,6 @@ static void input_stop_autorepeat(struct input_dev *dev)
174#define INPUT_PASS_TO_ALL (INPUT_PASS_TO_HANDLERS | INPUT_PASS_TO_DEVICE) 164#define INPUT_PASS_TO_ALL (INPUT_PASS_TO_HANDLERS | INPUT_PASS_TO_DEVICE)
175 165
176static int input_handle_abs_event(struct input_dev *dev, 166static int input_handle_abs_event(struct input_dev *dev,
177 struct input_handler *src_handler,
178 unsigned int code, int *pval) 167 unsigned int code, int *pval)
179{ 168{
180 bool is_mt_event; 169 bool is_mt_event;
@@ -218,15 +207,13 @@ static int input_handle_abs_event(struct input_dev *dev,
218 /* Flush pending "slot" event */ 207 /* Flush pending "slot" event */
219 if (is_mt_event && dev->slot != input_abs_get_val(dev, ABS_MT_SLOT)) { 208 if (is_mt_event && dev->slot != input_abs_get_val(dev, ABS_MT_SLOT)) {
220 input_abs_set_val(dev, ABS_MT_SLOT, dev->slot); 209 input_abs_set_val(dev, ABS_MT_SLOT, dev->slot);
221 input_pass_event(dev, src_handler, 210 input_pass_event(dev, EV_ABS, ABS_MT_SLOT, dev->slot);
222 EV_ABS, ABS_MT_SLOT, dev->slot);
223 } 211 }
224 212
225 return INPUT_PASS_TO_HANDLERS; 213 return INPUT_PASS_TO_HANDLERS;
226} 214}
227 215
228static void input_handle_event(struct input_dev *dev, 216static void input_handle_event(struct input_dev *dev,
229 struct input_handler *src_handler,
230 unsigned int type, unsigned int code, int value) 217 unsigned int type, unsigned int code, int value)
231{ 218{
232 int disposition = INPUT_IGNORE_EVENT; 219 int disposition = INPUT_IGNORE_EVENT;
@@ -279,8 +266,7 @@ static void input_handle_event(struct input_dev *dev,
279 266
280 case EV_ABS: 267 case EV_ABS:
281 if (is_event_supported(code, dev->absbit, ABS_MAX)) 268 if (is_event_supported(code, dev->absbit, ABS_MAX))
282 disposition = input_handle_abs_event(dev, src_handler, 269 disposition = input_handle_abs_event(dev, code, &value);
283 code, &value);
284 270
285 break; 271 break;
286 272
@@ -338,7 +324,7 @@ static void input_handle_event(struct input_dev *dev,
338 dev->event(dev, type, code, value); 324 dev->event(dev, type, code, value);
339 325
340 if (disposition & INPUT_PASS_TO_HANDLERS) 326 if (disposition & INPUT_PASS_TO_HANDLERS)
341 input_pass_event(dev, src_handler, type, code, value); 327 input_pass_event(dev, type, code, value);
342} 328}
343 329
344/** 330/**
@@ -367,7 +353,7 @@ void input_event(struct input_dev *dev,
367 353
368 spin_lock_irqsave(&dev->event_lock, flags); 354 spin_lock_irqsave(&dev->event_lock, flags);
369 add_input_randomness(type, code, value); 355 add_input_randomness(type, code, value);
370 input_handle_event(dev, NULL, type, code, value); 356 input_handle_event(dev, type, code, value);
371 spin_unlock_irqrestore(&dev->event_lock, flags); 357 spin_unlock_irqrestore(&dev->event_lock, flags);
372 } 358 }
373} 359}
@@ -397,8 +383,7 @@ void input_inject_event(struct input_handle *handle,
397 rcu_read_lock(); 383 rcu_read_lock();
398 grab = rcu_dereference(dev->grab); 384 grab = rcu_dereference(dev->grab);
399 if (!grab || grab == handle) 385 if (!grab || grab == handle)
400 input_handle_event(dev, handle->handler, 386 input_handle_event(dev, type, code, value);
401 type, code, value);
402 rcu_read_unlock(); 387 rcu_read_unlock();
403 388
404 spin_unlock_irqrestore(&dev->event_lock, flags); 389 spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -611,10 +596,10 @@ static void input_dev_release_keys(struct input_dev *dev)
611 for (code = 0; code <= KEY_MAX; code++) { 596 for (code = 0; code <= KEY_MAX; code++) {
612 if (is_event_supported(code, dev->keybit, KEY_MAX) && 597 if (is_event_supported(code, dev->keybit, KEY_MAX) &&
613 __test_and_clear_bit(code, dev->key)) { 598 __test_and_clear_bit(code, dev->key)) {
614 input_pass_event(dev, NULL, EV_KEY, code, 0); 599 input_pass_event(dev, EV_KEY, code, 0);
615 } 600 }
616 } 601 }
617 input_pass_event(dev, NULL, EV_SYN, SYN_REPORT, 1); 602 input_pass_event(dev, EV_SYN, SYN_REPORT, 1);
618 } 603 }
619} 604}
620 605
@@ -889,9 +874,9 @@ int input_set_keycode(struct input_dev *dev,
889 !is_event_supported(old_keycode, dev->keybit, KEY_MAX) && 874 !is_event_supported(old_keycode, dev->keybit, KEY_MAX) &&
890 __test_and_clear_bit(old_keycode, dev->key)) { 875 __test_and_clear_bit(old_keycode, dev->key)) {
891 876
892 input_pass_event(dev, NULL, EV_KEY, old_keycode, 0); 877 input_pass_event(dev, EV_KEY, old_keycode, 0);
893 if (dev->sync) 878 if (dev->sync)
894 input_pass_event(dev, NULL, EV_SYN, SYN_REPORT, 1); 879 input_pass_event(dev, EV_SYN, SYN_REPORT, 1);
895 } 880 }
896 881
897 out: 882 out:
diff --git a/drivers/input/misc/ixp4xx-beeper.c b/drivers/input/misc/ixp4xx-beeper.c
index 9dfd6e5f786f..1f38302a5951 100644
--- a/drivers/input/misc/ixp4xx-beeper.c
+++ b/drivers/input/misc/ixp4xx-beeper.c
@@ -69,11 +69,7 @@ static int ixp4xx_spkr_event(struct input_dev *dev, unsigned int type, unsigned
69 } 69 }
70 70
71 if (value > 20 && value < 32767) 71 if (value > 20 && value < 32767)
72#ifndef FREQ 72 count = (IXP4XX_TIMER_FREQ / (value * 4)) - 1;
73 count = (ixp4xx_get_board_tick_rate() / (value * 4)) - 1;
74#else
75 count = (FREQ / (value * 4)) - 1;
76#endif
77 73
78 ixp4xx_spkr_control(pin, count); 74 ixp4xx_spkr_control(pin, count);
79 75
diff --git a/drivers/input/misc/rotary_encoder.c b/drivers/input/misc/rotary_encoder.c
index 1f8e0108962e..7e64d01da2be 100644
--- a/drivers/input/misc/rotary_encoder.c
+++ b/drivers/input/misc/rotary_encoder.c
@@ -176,7 +176,7 @@ static int __devinit rotary_encoder_probe(struct platform_device *pdev)
176 176
177 /* request the IRQs */ 177 /* request the IRQs */
178 err = request_irq(encoder->irq_a, &rotary_encoder_irq, 178 err = request_irq(encoder->irq_a, &rotary_encoder_irq,
179 IORESOURCE_IRQ_HIGHEDGE | IORESOURCE_IRQ_LOWEDGE, 179 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
180 DRV_NAME, encoder); 180 DRV_NAME, encoder);
181 if (err) { 181 if (err) {
182 dev_err(&pdev->dev, "unable to request IRQ %d\n", 182 dev_err(&pdev->dev, "unable to request IRQ %d\n",
@@ -185,7 +185,7 @@ static int __devinit rotary_encoder_probe(struct platform_device *pdev)
185 } 185 }
186 186
187 err = request_irq(encoder->irq_b, &rotary_encoder_irq, 187 err = request_irq(encoder->irq_b, &rotary_encoder_irq,
188 IORESOURCE_IRQ_HIGHEDGE | IORESOURCE_IRQ_LOWEDGE, 188 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
189 DRV_NAME, encoder); 189 DRV_NAME, encoder);
190 if (err) { 190 if (err) {
191 dev_err(&pdev->dev, "unable to request IRQ %d\n", 191 dev_err(&pdev->dev, "unable to request IRQ %d\n",
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
index db5b0bca1a1a..7c38d1fbabf2 100644
--- a/drivers/input/serio/serio.c
+++ b/drivers/input/serio/serio.c
@@ -188,7 +188,8 @@ static void serio_free_event(struct serio_event *event)
188 kfree(event); 188 kfree(event);
189} 189}
190 190
191static void serio_remove_duplicate_events(struct serio_event *event) 191static void serio_remove_duplicate_events(void *object,
192 enum serio_event_type type)
192{ 193{
193 struct serio_event *e, *next; 194 struct serio_event *e, *next;
194 unsigned long flags; 195 unsigned long flags;
@@ -196,13 +197,13 @@ static void serio_remove_duplicate_events(struct serio_event *event)
196 spin_lock_irqsave(&serio_event_lock, flags); 197 spin_lock_irqsave(&serio_event_lock, flags);
197 198
198 list_for_each_entry_safe(e, next, &serio_event_list, node) { 199 list_for_each_entry_safe(e, next, &serio_event_list, node) {
199 if (event->object == e->object) { 200 if (object == e->object) {
200 /* 201 /*
201 * If this event is of different type we should not 202 * If this event is of different type we should not
202 * look further - we only suppress duplicate events 203 * look further - we only suppress duplicate events
203 * that were sent back-to-back. 204 * that were sent back-to-back.
204 */ 205 */
205 if (event->type != e->type) 206 if (type != e->type)
206 break; 207 break;
207 208
208 list_del_init(&e->node); 209 list_del_init(&e->node);
@@ -245,7 +246,7 @@ static void serio_handle_event(struct work_struct *work)
245 break; 246 break;
246 } 247 }
247 248
248 serio_remove_duplicate_events(event); 249 serio_remove_duplicate_events(event->object, event->type);
249 serio_free_event(event); 250 serio_free_event(event);
250 } 251 }
251 252
@@ -436,10 +437,12 @@ static ssize_t serio_rebind_driver(struct device *dev, struct device_attribute *
436 } else if (!strncmp(buf, "rescan", count)) { 437 } else if (!strncmp(buf, "rescan", count)) {
437 serio_disconnect_port(serio); 438 serio_disconnect_port(serio);
438 serio_find_driver(serio); 439 serio_find_driver(serio);
440 serio_remove_duplicate_events(serio, SERIO_RESCAN_PORT);
439 } else if ((drv = driver_find(buf, &serio_bus)) != NULL) { 441 } else if ((drv = driver_find(buf, &serio_bus)) != NULL) {
440 serio_disconnect_port(serio); 442 serio_disconnect_port(serio);
441 error = serio_bind_driver(serio, to_serio_driver(drv)); 443 error = serio_bind_driver(serio, to_serio_driver(drv));
442 put_driver(drv); 444 put_driver(drv);
445 serio_remove_duplicate_events(serio, SERIO_RESCAN_PORT);
443 } else { 446 } else {
444 error = -EINVAL; 447 error = -EINVAL;
445 } 448 }
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
index fc381498b798..cf8fb9f5d4a8 100644
--- a/drivers/input/tablet/wacom_sys.c
+++ b/drivers/input/tablet/wacom_sys.c
@@ -519,7 +519,7 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
519 /* Retrieve the physical and logical size for OEM devices */ 519 /* Retrieve the physical and logical size for OEM devices */
520 error = wacom_retrieve_hid_descriptor(intf, features); 520 error = wacom_retrieve_hid_descriptor(intf, features);
521 if (error) 521 if (error)
522 goto fail2; 522 goto fail3;
523 523
524 wacom_setup_device_quirks(features); 524 wacom_setup_device_quirks(features);
525 525
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index 14ea54b78e46..4bf2316e3284 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -941,28 +941,29 @@ static int __devinit ads7846_setup_pendown(struct spi_device *spi, struct ads784
941 struct ads7846_platform_data *pdata = spi->dev.platform_data; 941 struct ads7846_platform_data *pdata = spi->dev.platform_data;
942 int err; 942 int err;
943 943
944 /* REVISIT when the irq can be triggered active-low, or if for some 944 /*
945 * REVISIT when the irq can be triggered active-low, or if for some
945 * reason the touchscreen isn't hooked up, we don't need to access 946 * reason the touchscreen isn't hooked up, we don't need to access
946 * the pendown state. 947 * the pendown state.
947 */ 948 */
948 if (!pdata->get_pendown_state && !gpio_is_valid(pdata->gpio_pendown)) {
949 dev_err(&spi->dev, "no get_pendown_state nor gpio_pendown?\n");
950 return -EINVAL;
951 }
952 949
953 if (pdata->get_pendown_state) { 950 if (pdata->get_pendown_state) {
954 ts->get_pendown_state = pdata->get_pendown_state; 951 ts->get_pendown_state = pdata->get_pendown_state;
955 return 0; 952 } else if (gpio_is_valid(pdata->gpio_pendown)) {
956 }
957 953
958 err = gpio_request(pdata->gpio_pendown, "ads7846_pendown"); 954 err = gpio_request(pdata->gpio_pendown, "ads7846_pendown");
959 if (err) { 955 if (err) {
960 dev_err(&spi->dev, "failed to request pendown GPIO%d\n", 956 dev_err(&spi->dev, "failed to request pendown GPIO%d\n",
961 pdata->gpio_pendown); 957 pdata->gpio_pendown);
962 return err; 958 return err;
963 } 959 }
964 960
965 ts->gpio_pendown = pdata->gpio_pendown; 961 ts->gpio_pendown = pdata->gpio_pendown;
962
963 } else {
964 dev_err(&spi->dev, "no get_pendown_state nor gpio_pendown?\n");
965 return -EINVAL;
966 }
966 967
967 return 0; 968 return 0;
968} 969}
@@ -1353,7 +1354,7 @@ static int __devinit ads7846_probe(struct spi_device *spi)
1353 err_put_regulator: 1354 err_put_regulator:
1354 regulator_put(ts->reg); 1355 regulator_put(ts->reg);
1355 err_free_gpio: 1356 err_free_gpio:
1356 if (ts->gpio_pendown != -1) 1357 if (!ts->get_pendown_state)
1357 gpio_free(ts->gpio_pendown); 1358 gpio_free(ts->gpio_pendown);
1358 err_cleanup_filter: 1359 err_cleanup_filter:
1359 if (ts->filter_cleanup) 1360 if (ts->filter_cleanup)
@@ -1383,8 +1384,13 @@ static int __devexit ads7846_remove(struct spi_device *spi)
1383 regulator_disable(ts->reg); 1384 regulator_disable(ts->reg);
1384 regulator_put(ts->reg); 1385 regulator_put(ts->reg);
1385 1386
1386 if (ts->gpio_pendown != -1) 1387 if (!ts->get_pendown_state) {
1388 /*
1389 * If we are not using specialized pendown method we must
1390 * have been relying on gpio we set up ourselves.
1391 */
1387 gpio_free(ts->gpio_pendown); 1392 gpio_free(ts->gpio_pendown);
1393 }
1388 1394
1389 if (ts->filter_cleanup) 1395 if (ts->filter_cleanup)
1390 ts->filter_cleanup(ts->filter_data); 1396 ts->filter_cleanup(ts->filter_data);
diff --git a/drivers/input/touchscreen/wacom_w8001.c b/drivers/input/touchscreen/wacom_w8001.c
index 5cb8449c909d..c14412ef4648 100644
--- a/drivers/input/touchscreen/wacom_w8001.c
+++ b/drivers/input/touchscreen/wacom_w8001.c
@@ -51,6 +51,10 @@ MODULE_LICENSE("GPL");
51#define W8001_PKTLEN_TPCCTL 11 /* control packet */ 51#define W8001_PKTLEN_TPCCTL 11 /* control packet */
52#define W8001_PKTLEN_TOUCH2FG 13 52#define W8001_PKTLEN_TOUCH2FG 13
53 53
54/* resolution in points/mm */
55#define W8001_PEN_RESOLUTION 100
56#define W8001_TOUCH_RESOLUTION 10
57
54struct w8001_coord { 58struct w8001_coord {
55 u8 rdy; 59 u8 rdy;
56 u8 tsw; 60 u8 tsw;
@@ -198,7 +202,7 @@ static void parse_touchquery(u8 *data, struct w8001_touch_query *query)
198 query->y = 1024; 202 query->y = 1024;
199 if (query->panel_res) 203 if (query->panel_res)
200 query->x = query->y = (1 << query->panel_res); 204 query->x = query->y = (1 << query->panel_res);
201 query->panel_res = 10; 205 query->panel_res = W8001_TOUCH_RESOLUTION;
202 } 206 }
203} 207}
204 208
@@ -394,6 +398,8 @@ static int w8001_setup(struct w8001 *w8001)
394 398
395 input_set_abs_params(dev, ABS_X, 0, coord.x, 0, 0); 399 input_set_abs_params(dev, ABS_X, 0, coord.x, 0, 0);
396 input_set_abs_params(dev, ABS_Y, 0, coord.y, 0, 0); 400 input_set_abs_params(dev, ABS_Y, 0, coord.y, 0, 0);
401 input_abs_set_res(dev, ABS_X, W8001_PEN_RESOLUTION);
402 input_abs_set_res(dev, ABS_Y, W8001_PEN_RESOLUTION);
397 input_set_abs_params(dev, ABS_PRESSURE, 0, coord.pen_pressure, 0, 0); 403 input_set_abs_params(dev, ABS_PRESSURE, 0, coord.pen_pressure, 0, 0);
398 if (coord.tilt_x && coord.tilt_y) { 404 if (coord.tilt_x && coord.tilt_y) {
399 input_set_abs_params(dev, ABS_TILT_X, 0, coord.tilt_x, 0, 0); 405 input_set_abs_params(dev, ABS_TILT_X, 0, coord.tilt_x, 0, 0);
@@ -418,14 +424,17 @@ static int w8001_setup(struct w8001 *w8001)
418 w8001->max_touch_x = touch.x; 424 w8001->max_touch_x = touch.x;
419 w8001->max_touch_y = touch.y; 425 w8001->max_touch_y = touch.y;
420 426
421 /* scale to pen maximum */
422 if (w8001->max_pen_x && w8001->max_pen_y) { 427 if (w8001->max_pen_x && w8001->max_pen_y) {
428 /* if pen is supported scale to pen maximum */
423 touch.x = w8001->max_pen_x; 429 touch.x = w8001->max_pen_x;
424 touch.y = w8001->max_pen_y; 430 touch.y = w8001->max_pen_y;
431 touch.panel_res = W8001_PEN_RESOLUTION;
425 } 432 }
426 433
427 input_set_abs_params(dev, ABS_X, 0, touch.x, 0, 0); 434 input_set_abs_params(dev, ABS_X, 0, touch.x, 0, 0);
428 input_set_abs_params(dev, ABS_Y, 0, touch.y, 0, 0); 435 input_set_abs_params(dev, ABS_Y, 0, touch.y, 0, 0);
436 input_abs_set_res(dev, ABS_X, touch.panel_res);
437 input_abs_set_res(dev, ABS_Y, touch.panel_res);
429 438
430 switch (touch.sensor_id) { 439 switch (touch.sensor_id) {
431 case 0: 440 case 0:
diff --git a/drivers/isdn/hisax/isdnl2.c b/drivers/isdn/hisax/isdnl2.c
index 0858791978d8..cfff0c41d298 100644
--- a/drivers/isdn/hisax/isdnl2.c
+++ b/drivers/isdn/hisax/isdnl2.c
@@ -1247,10 +1247,10 @@ static void
1247l2_pull_iqueue(struct FsmInst *fi, int event, void *arg) 1247l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
1248{ 1248{
1249 struct PStack *st = fi->userdata; 1249 struct PStack *st = fi->userdata;
1250 struct sk_buff *skb, *oskb; 1250 struct sk_buff *skb;
1251 struct Layer2 *l2 = &st->l2; 1251 struct Layer2 *l2 = &st->l2;
1252 u_char header[MAX_HEADER_LEN]; 1252 u_char header[MAX_HEADER_LEN];
1253 int i; 1253 int i, hdr_space_needed;
1254 int unsigned p1; 1254 int unsigned p1;
1255 u_long flags; 1255 u_long flags;
1256 1256
@@ -1261,6 +1261,16 @@ l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
1261 if (!skb) 1261 if (!skb)
1262 return; 1262 return;
1263 1263
1264 hdr_space_needed = l2headersize(l2, 0);
1265 if (hdr_space_needed > skb_headroom(skb)) {
1266 struct sk_buff *orig_skb = skb;
1267
1268 skb = skb_realloc_headroom(skb, hdr_space_needed);
1269 if (!skb) {
1270 dev_kfree_skb(orig_skb);
1271 return;
1272 }
1273 }
1264 spin_lock_irqsave(&l2->lock, flags); 1274 spin_lock_irqsave(&l2->lock, flags);
1265 if(test_bit(FLG_MOD128, &l2->flag)) 1275 if(test_bit(FLG_MOD128, &l2->flag))
1266 p1 = (l2->vs - l2->va) % 128; 1276 p1 = (l2->vs - l2->va) % 128;
@@ -1285,19 +1295,7 @@ l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
1285 l2->vs = (l2->vs + 1) % 8; 1295 l2->vs = (l2->vs + 1) % 8;
1286 } 1296 }
1287 spin_unlock_irqrestore(&l2->lock, flags); 1297 spin_unlock_irqrestore(&l2->lock, flags);
1288 p1 = skb->data - skb->head; 1298 memcpy(skb_push(skb, i), header, i);
1289 if (p1 >= i)
1290 memcpy(skb_push(skb, i), header, i);
1291 else {
1292 printk(KERN_WARNING
1293 "isdl2 pull_iqueue skb header(%d/%d) too short\n", i, p1);
1294 oskb = skb;
1295 skb = alloc_skb(oskb->len + i, GFP_ATOMIC);
1296 memcpy(skb_put(skb, i), header, i);
1297 skb_copy_from_linear_data(oskb,
1298 skb_put(skb, oskb->len), oskb->len);
1299 dev_kfree_skb(oskb);
1300 }
1301 st->l2.l2l1(st, PH_PULL | INDICATION, skb); 1299 st->l2.l2l1(st, PH_PULL | INDICATION, skb);
1302 test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag); 1300 test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag);
1303 if (!test_and_set_bit(FLG_T200_RUN, &st->l2.flag)) { 1301 if (!test_and_set_bit(FLG_T200_RUN, &st->l2.flag)) {
diff --git a/drivers/isdn/hysdn/hysdn_defs.h b/drivers/isdn/hysdn/hysdn_defs.h
index 729df4089385..18b801ad97a4 100644
--- a/drivers/isdn/hysdn/hysdn_defs.h
+++ b/drivers/isdn/hysdn/hysdn_defs.h
@@ -227,7 +227,6 @@ extern hysdn_card *card_root; /* pointer to first card */
227/*************************/ 227/*************************/
228/* im/exported functions */ 228/* im/exported functions */
229/*************************/ 229/*************************/
230extern char *hysdn_getrev(const char *);
231 230
232/* hysdn_procconf.c */ 231/* hysdn_procconf.c */
233extern int hysdn_procconf_init(void); /* init proc config filesys */ 232extern int hysdn_procconf_init(void); /* init proc config filesys */
@@ -259,7 +258,6 @@ extern int hysdn_tx_cfgline(hysdn_card *, unsigned char *,
259 258
260/* hysdn_net.c */ 259/* hysdn_net.c */
261extern unsigned int hynet_enable; 260extern unsigned int hynet_enable;
262extern char *hysdn_net_revision;
263extern int hysdn_net_create(hysdn_card *); /* create a new net device */ 261extern int hysdn_net_create(hysdn_card *); /* create a new net device */
264extern int hysdn_net_release(hysdn_card *); /* delete the device */ 262extern int hysdn_net_release(hysdn_card *); /* delete the device */
265extern char *hysdn_net_getname(hysdn_card *); /* get name of net interface */ 263extern char *hysdn_net_getname(hysdn_card *); /* get name of net interface */
diff --git a/drivers/isdn/hysdn/hysdn_init.c b/drivers/isdn/hysdn/hysdn_init.c
index b7cc5c2f08c6..0ab42ace1692 100644
--- a/drivers/isdn/hysdn/hysdn_init.c
+++ b/drivers/isdn/hysdn/hysdn_init.c
@@ -36,7 +36,6 @@ MODULE_DESCRIPTION("ISDN4Linux: Driver for HYSDN cards");
36MODULE_AUTHOR("Werner Cornelius"); 36MODULE_AUTHOR("Werner Cornelius");
37MODULE_LICENSE("GPL"); 37MODULE_LICENSE("GPL");
38 38
39static char *hysdn_init_revision = "$Revision: 1.6.6.6 $";
40static int cardmax; /* number of found cards */ 39static int cardmax; /* number of found cards */
41hysdn_card *card_root = NULL; /* pointer to first card */ 40hysdn_card *card_root = NULL; /* pointer to first card */
42static hysdn_card *card_last = NULL; /* pointer to first card */ 41static hysdn_card *card_last = NULL; /* pointer to first card */
@@ -49,25 +48,6 @@ static hysdn_card *card_last = NULL; /* pointer to first card */
49/* Additionally newer versions may be activated without rebooting. */ 48/* Additionally newer versions may be activated without rebooting. */
50/****************************************************************************/ 49/****************************************************************************/
51 50
52/******************************************************/
53/* extract revision number from string for log output */
54/******************************************************/
55char *
56hysdn_getrev(const char *revision)
57{
58 char *rev;
59 char *p;
60
61 if ((p = strchr(revision, ':'))) {
62 rev = p + 2;
63 p = strchr(rev, '$');
64 *--p = 0;
65 } else
66 rev = "???";
67 return rev;
68}
69
70
71/****************************************************************************/ 51/****************************************************************************/
72/* init_module is called once when the module is loaded to do all necessary */ 52/* init_module is called once when the module is loaded to do all necessary */
73/* things like autodetect... */ 53/* things like autodetect... */
@@ -175,13 +155,9 @@ static int hysdn_have_procfs;
175static int __init 155static int __init
176hysdn_init(void) 156hysdn_init(void)
177{ 157{
178 char tmp[50];
179 int rc; 158 int rc;
180 159
181 strcpy(tmp, hysdn_init_revision); 160 printk(KERN_NOTICE "HYSDN: module loaded\n");
182 printk(KERN_NOTICE "HYSDN: module Rev: %s loaded\n", hysdn_getrev(tmp));
183 strcpy(tmp, hysdn_net_revision);
184 printk(KERN_NOTICE "HYSDN: network interface Rev: %s \n", hysdn_getrev(tmp));
185 161
186 rc = pci_register_driver(&hysdn_pci_driver); 162 rc = pci_register_driver(&hysdn_pci_driver);
187 if (rc) 163 if (rc)
diff --git a/drivers/isdn/hysdn/hysdn_net.c b/drivers/isdn/hysdn/hysdn_net.c
index feec8d89d719..11f2cce26005 100644
--- a/drivers/isdn/hysdn/hysdn_net.c
+++ b/drivers/isdn/hysdn/hysdn_net.c
@@ -26,9 +26,6 @@
26unsigned int hynet_enable = 0xffffffff; 26unsigned int hynet_enable = 0xffffffff;
27module_param(hynet_enable, uint, 0); 27module_param(hynet_enable, uint, 0);
28 28
29/* store the actual version for log reporting */
30char *hysdn_net_revision = "$Revision: 1.8.6.4 $";
31
32#define MAX_SKB_BUFFERS 20 /* number of buffers for keeping TX-data */ 29#define MAX_SKB_BUFFERS 20 /* number of buffers for keeping TX-data */
33 30
34/****************************************************************************/ 31/****************************************************************************/
diff --git a/drivers/isdn/hysdn/hysdn_procconf.c b/drivers/isdn/hysdn/hysdn_procconf.c
index 96b3e39c3356..5fe83bd42061 100644
--- a/drivers/isdn/hysdn/hysdn_procconf.c
+++ b/drivers/isdn/hysdn/hysdn_procconf.c
@@ -23,7 +23,6 @@
23#include "hysdn_defs.h" 23#include "hysdn_defs.h"
24 24
25static DEFINE_MUTEX(hysdn_conf_mutex); 25static DEFINE_MUTEX(hysdn_conf_mutex);
26static char *hysdn_procconf_revision = "$Revision: 1.8.6.4 $";
27 26
28#define INFO_OUT_LEN 80 /* length of info line including lf */ 27#define INFO_OUT_LEN 80 /* length of info line including lf */
29 28
@@ -404,7 +403,7 @@ hysdn_procconf_init(void)
404 card = card->next; /* next entry */ 403 card = card->next; /* next entry */
405 } 404 }
406 405
407 printk(KERN_NOTICE "HYSDN: procfs Rev. %s initialised\n", hysdn_getrev(hysdn_procconf_revision)); 406 printk(KERN_NOTICE "HYSDN: procfs initialised\n");
408 return (0); 407 return (0);
409} /* hysdn_procconf_init */ 408} /* hysdn_procconf_init */
410 409
diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
index f2b5bab5e6a1..1f355bb85e54 100644
--- a/drivers/isdn/icn/icn.c
+++ b/drivers/isdn/icn/icn.c
@@ -1627,7 +1627,7 @@ __setup("icn=", icn_setup);
1627static int __init icn_init(void) 1627static int __init icn_init(void)
1628{ 1628{
1629 char *p; 1629 char *p;
1630 char rev[20]; 1630 char rev[21];
1631 1631
1632 memset(&dev, 0, sizeof(icn_dev)); 1632 memset(&dev, 0, sizeof(icn_dev));
1633 dev.memaddr = (membase & 0x0ffc000); 1633 dev.memaddr = (membase & 0x0ffc000);
@@ -1638,6 +1638,7 @@ static int __init icn_init(void)
1638 1638
1639 if ((p = strchr(revision, ':'))) { 1639 if ((p = strchr(revision, ':'))) {
1640 strncpy(rev, p + 1, 20); 1640 strncpy(rev, p + 1, 20);
1641 rev[20] = '\0';
1641 p = strchr(rev, '$'); 1642 p = strchr(rev, '$');
1642 if (p) 1643 if (p)
1643 *p = 0; 1644 *p = 0;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index b76cfc89e1b5..0cc30ecda4c1 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -287,6 +287,7 @@ static int md_make_request(struct request_queue *q, struct bio *bio)
287 mddev_t *mddev = q->queuedata; 287 mddev_t *mddev = q->queuedata;
288 int rv; 288 int rv;
289 int cpu; 289 int cpu;
290 unsigned int sectors;
290 291
291 if (mddev == NULL || mddev->pers == NULL 292 if (mddev == NULL || mddev->pers == NULL
292 || !mddev->ready) { 293 || !mddev->ready) {
@@ -311,12 +312,16 @@ static int md_make_request(struct request_queue *q, struct bio *bio)
311 atomic_inc(&mddev->active_io); 312 atomic_inc(&mddev->active_io);
312 rcu_read_unlock(); 313 rcu_read_unlock();
313 314
315 /*
316 * save the sectors now since our bio can
317 * go away inside make_request
318 */
319 sectors = bio_sectors(bio);
314 rv = mddev->pers->make_request(mddev, bio); 320 rv = mddev->pers->make_request(mddev, bio);
315 321
316 cpu = part_stat_lock(); 322 cpu = part_stat_lock();
317 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); 323 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
318 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], 324 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors);
319 bio_sectors(bio));
320 part_stat_unlock(); 325 part_stat_unlock();
321 326
322 if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended) 327 if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
@@ -1947,8 +1952,6 @@ static int lock_rdev(mdk_rdev_t *rdev, dev_t dev, int shared)
1947 __bdevname(dev, b)); 1952 __bdevname(dev, b));
1948 return PTR_ERR(bdev); 1953 return PTR_ERR(bdev);
1949 } 1954 }
1950 if (!shared)
1951 set_bit(AllReserved, &rdev->flags);
1952 rdev->bdev = bdev; 1955 rdev->bdev = bdev;
1953 return err; 1956 return err;
1954} 1957}
@@ -2465,6 +2468,9 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2465 if (rdev->raid_disk != -1) 2468 if (rdev->raid_disk != -1)
2466 return -EBUSY; 2469 return -EBUSY;
2467 2470
2471 if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery))
2472 return -EBUSY;
2473
2468 if (rdev->mddev->pers->hot_add_disk == NULL) 2474 if (rdev->mddev->pers->hot_add_disk == NULL)
2469 return -EINVAL; 2475 return -EINVAL;
2470 2476
@@ -2610,12 +2616,11 @@ rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2610 2616
2611 mddev_lock(mddev); 2617 mddev_lock(mddev);
2612 list_for_each_entry(rdev2, &mddev->disks, same_set) 2618 list_for_each_entry(rdev2, &mddev->disks, same_set)
2613 if (test_bit(AllReserved, &rdev2->flags) || 2619 if (rdev->bdev == rdev2->bdev &&
2614 (rdev->bdev == rdev2->bdev && 2620 rdev != rdev2 &&
2615 rdev != rdev2 && 2621 overlaps(rdev->data_offset, rdev->sectors,
2616 overlaps(rdev->data_offset, rdev->sectors, 2622 rdev2->data_offset,
2617 rdev2->data_offset, 2623 rdev2->sectors)) {
2618 rdev2->sectors))) {
2619 overlap = 1; 2624 overlap = 1;
2620 break; 2625 break;
2621 } 2626 }
@@ -5578,6 +5583,8 @@ static int update_raid_disks(mddev_t *mddev, int raid_disks)
5578 mddev->delta_disks = raid_disks - mddev->raid_disks; 5583 mddev->delta_disks = raid_disks - mddev->raid_disks;
5579 5584
5580 rv = mddev->pers->check_reshape(mddev); 5585 rv = mddev->pers->check_reshape(mddev);
5586 if (rv < 0)
5587 mddev->delta_disks = 0;
5581 return rv; 5588 return rv;
5582} 5589}
5583 5590
@@ -6985,9 +6992,6 @@ void md_do_sync(mddev_t *mddev)
6985 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 6992 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
6986 mddev->resync_min = mddev->curr_resync_completed; 6993 mddev->resync_min = mddev->curr_resync_completed;
6987 mddev->curr_resync = 0; 6994 mddev->curr_resync = 0;
6988 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
6989 mddev->curr_resync_completed = 0;
6990 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
6991 wake_up(&resync_wait); 6995 wake_up(&resync_wait);
6992 set_bit(MD_RECOVERY_DONE, &mddev->recovery); 6996 set_bit(MD_RECOVERY_DONE, &mddev->recovery);
6993 md_wakeup_thread(mddev->thread); 6997 md_wakeup_thread(mddev->thread);
@@ -7028,7 +7032,7 @@ static int remove_and_add_spares(mddev_t *mddev)
7028 } 7032 }
7029 } 7033 }
7030 7034
7031 if (mddev->degraded && ! mddev->ro && !mddev->recovery_disabled) { 7035 if (mddev->degraded && !mddev->recovery_disabled) {
7032 list_for_each_entry(rdev, &mddev->disks, same_set) { 7036 list_for_each_entry(rdev, &mddev->disks, same_set) {
7033 if (rdev->raid_disk >= 0 && 7037 if (rdev->raid_disk >= 0 &&
7034 !test_bit(In_sync, &rdev->flags) && 7038 !test_bit(In_sync, &rdev->flags) &&
@@ -7151,7 +7155,20 @@ void md_check_recovery(mddev_t *mddev)
7151 /* Only thing we do on a ro array is remove 7155 /* Only thing we do on a ro array is remove
7152 * failed devices. 7156 * failed devices.
7153 */ 7157 */
7154 remove_and_add_spares(mddev); 7158 mdk_rdev_t *rdev;
7159 list_for_each_entry(rdev, &mddev->disks, same_set)
7160 if (rdev->raid_disk >= 0 &&
7161 !test_bit(Blocked, &rdev->flags) &&
7162 test_bit(Faulty, &rdev->flags) &&
7163 atomic_read(&rdev->nr_pending)==0) {
7164 if (mddev->pers->hot_remove_disk(
7165 mddev, rdev->raid_disk)==0) {
7166 char nm[20];
7167 sprintf(nm,"rd%d", rdev->raid_disk);
7168 sysfs_remove_link(&mddev->kobj, nm);
7169 rdev->raid_disk = -1;
7170 }
7171 }
7155 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 7172 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7156 goto unlock; 7173 goto unlock;
7157 } 7174 }
diff --git a/drivers/md/md.h b/drivers/md/md.h
index eec517ced31a..7e90b8593b2a 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -93,8 +93,6 @@ struct mdk_rdev_s
93#define Faulty 1 /* device is known to have a fault */ 93#define Faulty 1 /* device is known to have a fault */
94#define In_sync 2 /* device is in_sync with rest of array */ 94#define In_sync 2 /* device is in_sync with rest of array */
95#define WriteMostly 4 /* Avoid reading if at all possible */ 95#define WriteMostly 4 /* Avoid reading if at all possible */
96#define AllReserved 6 /* If whole device is reserved for
97 * one array */
98#define AutoDetected 7 /* added by auto-detect */ 96#define AutoDetected 7 /* added by auto-detect */
99#define Blocked 8 /* An error occured on an externally 97#define Blocked 8 /* An error occured on an externally
100 * managed array, don't allow writes 98 * managed array, don't allow writes
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index a39f4c355e55..637a96855edb 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -179,6 +179,14 @@ static int create_strip_zones(mddev_t *mddev, raid0_conf_t **private_conf)
179 rdev1->new_raid_disk = j; 179 rdev1->new_raid_disk = j;
180 } 180 }
181 181
182 if (mddev->level == 1) {
183 /* taiking over a raid1 array-
184 * we have only one active disk
185 */
186 j = 0;
187 rdev1->new_raid_disk = j;
188 }
189
182 if (j < 0 || j >= mddev->raid_disks) { 190 if (j < 0 || j >= mddev->raid_disks) {
183 printk(KERN_ERR "md/raid0:%s: bad disk number %d - " 191 printk(KERN_ERR "md/raid0:%s: bad disk number %d - "
184 "aborting!\n", mdname(mddev), j); 192 "aborting!\n", mdname(mddev), j);
@@ -644,12 +652,38 @@ static void *raid0_takeover_raid10(mddev_t *mddev)
644 return priv_conf; 652 return priv_conf;
645} 653}
646 654
655static void *raid0_takeover_raid1(mddev_t *mddev)
656{
657 raid0_conf_t *priv_conf;
658
659 /* Check layout:
660 * - (N - 1) mirror drives must be already faulty
661 */
662 if ((mddev->raid_disks - 1) != mddev->degraded) {
663 printk(KERN_ERR "md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n",
664 mdname(mddev));
665 return ERR_PTR(-EINVAL);
666 }
667
668 /* Set new parameters */
669 mddev->new_level = 0;
670 mddev->new_layout = 0;
671 mddev->new_chunk_sectors = 128; /* by default set chunk size to 64k */
672 mddev->delta_disks = 1 - mddev->raid_disks;
673 /* make sure it will be not marked as dirty */
674 mddev->recovery_cp = MaxSector;
675
676 create_strip_zones(mddev, &priv_conf);
677 return priv_conf;
678}
679
647static void *raid0_takeover(mddev_t *mddev) 680static void *raid0_takeover(mddev_t *mddev)
648{ 681{
649 /* raid0 can take over: 682 /* raid0 can take over:
650 * raid4 - if all data disks are active. 683 * raid4 - if all data disks are active.
651 * raid5 - providing it is Raid4 layout and one disk is faulty 684 * raid5 - providing it is Raid4 layout and one disk is faulty
652 * raid10 - assuming we have all necessary active disks 685 * raid10 - assuming we have all necessary active disks
686 * raid1 - with (N -1) mirror drives faulty
653 */ 687 */
654 if (mddev->level == 4) 688 if (mddev->level == 4)
655 return raid0_takeover_raid45(mddev); 689 return raid0_takeover_raid45(mddev);
@@ -665,6 +699,12 @@ static void *raid0_takeover(mddev_t *mddev)
665 if (mddev->level == 10) 699 if (mddev->level == 10)
666 return raid0_takeover_raid10(mddev); 700 return raid0_takeover_raid10(mddev);
667 701
702 if (mddev->level == 1)
703 return raid0_takeover_raid1(mddev);
704
705 printk(KERN_ERR "Takeover from raid%i to raid0 not supported\n",
706 mddev->level);
707
668 return ERR_PTR(-EINVAL); 708 return ERR_PTR(-EINVAL);
669} 709}
670 710
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 69b659544390..3b607b28741b 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -2463,11 +2463,13 @@ static void *raid10_takeover_raid0(mddev_t *mddev)
2463 mddev->recovery_cp = MaxSector; 2463 mddev->recovery_cp = MaxSector;
2464 2464
2465 conf = setup_conf(mddev); 2465 conf = setup_conf(mddev);
2466 if (!IS_ERR(conf)) 2466 if (!IS_ERR(conf)) {
2467 list_for_each_entry(rdev, &mddev->disks, same_set) 2467 list_for_each_entry(rdev, &mddev->disks, same_set)
2468 if (rdev->raid_disk >= 0) 2468 if (rdev->raid_disk >= 0)
2469 rdev->new_raid_disk = rdev->raid_disk * 2; 2469 rdev->new_raid_disk = rdev->raid_disk * 2;
2470 2470 conf->barrier = 1;
2471 }
2472
2471 return conf; 2473 return conf;
2472} 2474}
2473 2475
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 5044babfcda0..702812824195 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5517,7 +5517,6 @@ static int raid5_start_reshape(mddev_t *mddev)
5517 raid5_conf_t *conf = mddev->private; 5517 raid5_conf_t *conf = mddev->private;
5518 mdk_rdev_t *rdev; 5518 mdk_rdev_t *rdev;
5519 int spares = 0; 5519 int spares = 0;
5520 int added_devices = 0;
5521 unsigned long flags; 5520 unsigned long flags;
5522 5521
5523 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 5522 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
@@ -5527,8 +5526,8 @@ static int raid5_start_reshape(mddev_t *mddev)
5527 return -ENOSPC; 5526 return -ENOSPC;
5528 5527
5529 list_for_each_entry(rdev, &mddev->disks, same_set) 5528 list_for_each_entry(rdev, &mddev->disks, same_set)
5530 if ((rdev->raid_disk < 0 || rdev->raid_disk >= conf->raid_disks) 5529 if (!test_bit(In_sync, &rdev->flags)
5531 && !test_bit(Faulty, &rdev->flags)) 5530 && !test_bit(Faulty, &rdev->flags))
5532 spares++; 5531 spares++;
5533 5532
5534 if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded) 5533 if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded)
@@ -5571,34 +5570,35 @@ static int raid5_start_reshape(mddev_t *mddev)
5571 * to correctly record the "partially reconstructed" state of 5570 * to correctly record the "partially reconstructed" state of
5572 * such devices during the reshape and confusion could result. 5571 * such devices during the reshape and confusion could result.
5573 */ 5572 */
5574 if (mddev->delta_disks >= 0) 5573 if (mddev->delta_disks >= 0) {
5575 list_for_each_entry(rdev, &mddev->disks, same_set) 5574 int added_devices = 0;
5576 if (rdev->raid_disk < 0 && 5575 list_for_each_entry(rdev, &mddev->disks, same_set)
5577 !test_bit(Faulty, &rdev->flags)) { 5576 if (rdev->raid_disk < 0 &&
5578 if (raid5_add_disk(mddev, rdev) == 0) { 5577 !test_bit(Faulty, &rdev->flags)) {
5579 char nm[20]; 5578 if (raid5_add_disk(mddev, rdev) == 0) {
5580 if (rdev->raid_disk >= conf->previous_raid_disks) { 5579 char nm[20];
5581 set_bit(In_sync, &rdev->flags); 5580 if (rdev->raid_disk
5582 added_devices++; 5581 >= conf->previous_raid_disks) {
5583 } else 5582 set_bit(In_sync, &rdev->flags);
5584 rdev->recovery_offset = 0; 5583 added_devices++;
5585 sprintf(nm, "rd%d", rdev->raid_disk); 5584 } else
5586 if (sysfs_create_link(&mddev->kobj, 5585 rdev->recovery_offset = 0;
5587 &rdev->kobj, nm)) 5586 sprintf(nm, "rd%d", rdev->raid_disk);
5588 /* Failure here is OK */; 5587 if (sysfs_create_link(&mddev->kobj,
5589 } else 5588 &rdev->kobj, nm))
5590 break; 5589 /* Failure here is OK */;
5591 } else if (rdev->raid_disk >= conf->previous_raid_disks 5590 }
5592 && !test_bit(Faulty, &rdev->flags)) { 5591 } else if (rdev->raid_disk >= conf->previous_raid_disks
5593 /* This is a spare that was manually added */ 5592 && !test_bit(Faulty, &rdev->flags)) {
5594 set_bit(In_sync, &rdev->flags); 5593 /* This is a spare that was manually added */
5595 added_devices++; 5594 set_bit(In_sync, &rdev->flags);
5596 } 5595 added_devices++;
5596 }
5597 5597
5598 /* When a reshape changes the number of devices, ->degraded 5598 /* When a reshape changes the number of devices,
5599 * is measured against the larger of the pre and post number of 5599 * ->degraded is measured against the larger of the
5600 * devices.*/ 5600 * pre and post number of devices.
5601 if (mddev->delta_disks > 0) { 5601 */
5602 spin_lock_irqsave(&conf->device_lock, flags); 5602 spin_lock_irqsave(&conf->device_lock, flags);
5603 mddev->degraded += (conf->raid_disks - conf->previous_raid_disks) 5603 mddev->degraded += (conf->raid_disks - conf->previous_raid_disks)
5604 - added_devices; 5604 - added_devices;
diff --git a/drivers/media/rc/ir-lirc-codec.c b/drivers/media/rc/ir-lirc-codec.c
index f011c5d9dea1..1c5cc65ea1e1 100644
--- a/drivers/media/rc/ir-lirc-codec.c
+++ b/drivers/media/rc/ir-lirc-codec.c
@@ -1,4 +1,4 @@
1/* ir-lirc-codec.c - ir-core to classic lirc interface bridge 1/* ir-lirc-codec.c - rc-core to classic lirc interface bridge
2 * 2 *
3 * Copyright (C) 2010 by Jarod Wilson <jarod@redhat.com> 3 * Copyright (C) 2010 by Jarod Wilson <jarod@redhat.com>
4 * 4 *
@@ -47,6 +47,7 @@ static int ir_lirc_decode(struct rc_dev *dev, struct ir_raw_event ev)
47 /* Carrier reports */ 47 /* Carrier reports */
48 if (ev.carrier_report) { 48 if (ev.carrier_report) {
49 sample = LIRC_FREQUENCY(ev.carrier); 49 sample = LIRC_FREQUENCY(ev.carrier);
50 IR_dprintk(2, "carrier report (freq: %d)\n", sample);
50 51
51 /* Packet end */ 52 /* Packet end */
52 } else if (ev.timeout) { 53 } else if (ev.timeout) {
@@ -62,6 +63,7 @@ static int ir_lirc_decode(struct rc_dev *dev, struct ir_raw_event ev)
62 return 0; 63 return 0;
63 64
64 sample = LIRC_TIMEOUT(ev.duration / 1000); 65 sample = LIRC_TIMEOUT(ev.duration / 1000);
66 IR_dprintk(2, "timeout report (duration: %d)\n", sample);
65 67
66 /* Normal sample */ 68 /* Normal sample */
67 } else { 69 } else {
@@ -85,6 +87,8 @@ static int ir_lirc_decode(struct rc_dev *dev, struct ir_raw_event ev)
85 87
86 sample = ev.pulse ? LIRC_PULSE(ev.duration / 1000) : 88 sample = ev.pulse ? LIRC_PULSE(ev.duration / 1000) :
87 LIRC_SPACE(ev.duration / 1000); 89 LIRC_SPACE(ev.duration / 1000);
90 IR_dprintk(2, "delivering %uus %s to lirc_dev\n",
91 TO_US(ev.duration), TO_STR(ev.pulse));
88 } 92 }
89 93
90 lirc_buffer_write(dev->raw->lirc.drv->rbuf, 94 lirc_buffer_write(dev->raw->lirc.drv->rbuf,
diff --git a/drivers/media/rc/keymaps/rc-rc6-mce.c b/drivers/media/rc/keymaps/rc-rc6-mce.c
index 3bf3337875d1..2f5dc0622b94 100644
--- a/drivers/media/rc/keymaps/rc-rc6-mce.c
+++ b/drivers/media/rc/keymaps/rc-rc6-mce.c
@@ -3,6 +3,9 @@
3 * 3 *
4 * Copyright (c) 2010 by Jarod Wilson <jarod@redhat.com> 4 * Copyright (c) 2010 by Jarod Wilson <jarod@redhat.com>
5 * 5 *
6 * See http://mediacenterguides.com/book/export/html/31 for details on
7 * key mappings.
8 *
6 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 10 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or 11 * the Free Software Foundation; either version 2 of the License, or
@@ -60,6 +63,9 @@ static struct rc_map_table rc6_mce[] = {
60 { 0x800f0426, KEY_EPG }, /* Guide */ 63 { 0x800f0426, KEY_EPG }, /* Guide */
61 { 0x800f0427, KEY_ZOOM }, /* Aspect */ 64 { 0x800f0427, KEY_ZOOM }, /* Aspect */
62 65
66 { 0x800f0432, KEY_MODE }, /* Visualization */
67 { 0x800f0433, KEY_PRESENTATION }, /* Slide Show */
68 { 0x800f0434, KEY_EJECTCD },
63 { 0x800f043a, KEY_BRIGHTNESSUP }, 69 { 0x800f043a, KEY_BRIGHTNESSUP },
64 70
65 { 0x800f0446, KEY_TV }, 71 { 0x800f0446, KEY_TV },
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index 079353e5d558..6df0a4980645 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -816,7 +816,7 @@ static void mceusb_handle_command(struct mceusb_dev *ir, int index)
816 switch (ir->buf_in[index]) { 816 switch (ir->buf_in[index]) {
817 /* 2-byte return value commands */ 817 /* 2-byte return value commands */
818 case MCE_CMD_S_TIMEOUT: 818 case MCE_CMD_S_TIMEOUT:
819 ir->rc->timeout = MS_TO_NS((hi << 8 | lo) / 2); 819 ir->rc->timeout = US_TO_NS((hi << 8 | lo) / 2);
820 break; 820 break;
821 821
822 /* 1-byte return value commands */ 822 /* 1-byte return value commands */
@@ -855,9 +855,10 @@ static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len)
855 break; 855 break;
856 case PARSE_IRDATA: 856 case PARSE_IRDATA:
857 ir->rem--; 857 ir->rem--;
858 init_ir_raw_event(&rawir);
858 rawir.pulse = ((ir->buf_in[i] & MCE_PULSE_BIT) != 0); 859 rawir.pulse = ((ir->buf_in[i] & MCE_PULSE_BIT) != 0);
859 rawir.duration = (ir->buf_in[i] & MCE_PULSE_MASK) 860 rawir.duration = (ir->buf_in[i] & MCE_PULSE_MASK)
860 * MS_TO_US(MCE_TIME_UNIT); 861 * US_TO_NS(MCE_TIME_UNIT);
861 862
862 dev_dbg(ir->dev, "Storing %s with duration %d\n", 863 dev_dbg(ir->dev, "Storing %s with duration %d\n",
863 rawir.pulse ? "pulse" : "space", 864 rawir.pulse ? "pulse" : "space",
@@ -883,6 +884,8 @@ static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len)
883 i, ir->rem + 1, false); 884 i, ir->rem + 1, false);
884 if (ir->rem) 885 if (ir->rem)
885 ir->parser_state = PARSE_IRDATA; 886 ir->parser_state = PARSE_IRDATA;
887 else
888 ir_raw_event_reset(ir->rc);
886 break; 889 break;
887 } 890 }
888 891
@@ -1060,7 +1063,7 @@ static struct rc_dev *mceusb_init_rc_dev(struct mceusb_dev *ir)
1060 rc->priv = ir; 1063 rc->priv = ir;
1061 rc->driver_type = RC_DRIVER_IR_RAW; 1064 rc->driver_type = RC_DRIVER_IR_RAW;
1062 rc->allowed_protos = RC_TYPE_ALL; 1065 rc->allowed_protos = RC_TYPE_ALL;
1063 rc->timeout = MS_TO_NS(1000); 1066 rc->timeout = US_TO_NS(1000);
1064 if (!ir->flags.no_tx) { 1067 if (!ir->flags.no_tx) {
1065 rc->s_tx_mask = mceusb_set_tx_mask; 1068 rc->s_tx_mask = mceusb_set_tx_mask;
1066 rc->s_tx_carrier = mceusb_set_tx_carrier; 1069 rc->s_tx_carrier = mceusb_set_tx_carrier;
diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c
index dd4caf8ef80b..273d9d674792 100644
--- a/drivers/media/rc/nuvoton-cir.c
+++ b/drivers/media/rc/nuvoton-cir.c
@@ -460,7 +460,7 @@ static u32 nvt_rx_carrier_detect(struct nvt_dev *nvt)
460 return 0; 460 return 0;
461 } 461 }
462 462
463 carrier = (count * 1000000) / duration; 463 carrier = MS_TO_NS(count) / duration;
464 464
465 if ((carrier > MAX_CARRIER) || (carrier < MIN_CARRIER)) 465 if ((carrier > MAX_CARRIER) || (carrier < MIN_CARRIER))
466 nvt_dbg("WTF? Carrier frequency out of range!"); 466 nvt_dbg("WTF? Carrier frequency out of range!");
@@ -612,8 +612,8 @@ static void nvt_process_rx_ir_data(struct nvt_dev *nvt)
612 sample = nvt->buf[i]; 612 sample = nvt->buf[i];
613 613
614 rawir.pulse = ((sample & BUF_PULSE_BIT) != 0); 614 rawir.pulse = ((sample & BUF_PULSE_BIT) != 0);
615 rawir.duration = (sample & BUF_LEN_MASK) 615 rawir.duration = US_TO_NS((sample & BUF_LEN_MASK)
616 * SAMPLE_PERIOD * 1000; 616 * SAMPLE_PERIOD);
617 617
618 if ((sample & BUF_LEN_MASK) == BUF_LEN_MASK) { 618 if ((sample & BUF_LEN_MASK) == BUF_LEN_MASK) {
619 if (nvt->rawir.pulse == rawir.pulse) 619 if (nvt->rawir.pulse == rawir.pulse)
diff --git a/drivers/media/rc/streamzap.c b/drivers/media/rc/streamzap.c
index 6e2911c2abfb..e435d94c0776 100644
--- a/drivers/media/rc/streamzap.c
+++ b/drivers/media/rc/streamzap.c
@@ -164,7 +164,7 @@ static void sz_push_full_pulse(struct streamzap_ir *sz,
164 sz->signal_start.tv_usec - 164 sz->signal_start.tv_usec -
165 sz->signal_last.tv_usec); 165 sz->signal_last.tv_usec);
166 rawir.duration -= sz->sum; 166 rawir.duration -= sz->sum;
167 rawir.duration *= 1000; 167 rawir.duration = US_TO_NS(rawir.duration);
168 rawir.duration &= IR_MAX_DURATION; 168 rawir.duration &= IR_MAX_DURATION;
169 } 169 }
170 sz_push(sz, rawir); 170 sz_push(sz, rawir);
@@ -177,7 +177,7 @@ static void sz_push_full_pulse(struct streamzap_ir *sz,
177 rawir.duration = ((int) value) * SZ_RESOLUTION; 177 rawir.duration = ((int) value) * SZ_RESOLUTION;
178 rawir.duration += SZ_RESOLUTION / 2; 178 rawir.duration += SZ_RESOLUTION / 2;
179 sz->sum += rawir.duration; 179 sz->sum += rawir.duration;
180 rawir.duration *= 1000; 180 rawir.duration = US_TO_NS(rawir.duration);
181 rawir.duration &= IR_MAX_DURATION; 181 rawir.duration &= IR_MAX_DURATION;
182 sz_push(sz, rawir); 182 sz_push(sz, rawir);
183} 183}
@@ -197,7 +197,7 @@ static void sz_push_full_space(struct streamzap_ir *sz,
197 rawir.duration = ((int) value) * SZ_RESOLUTION; 197 rawir.duration = ((int) value) * SZ_RESOLUTION;
198 rawir.duration += SZ_RESOLUTION / 2; 198 rawir.duration += SZ_RESOLUTION / 2;
199 sz->sum += rawir.duration; 199 sz->sum += rawir.duration;
200 rawir.duration *= 1000; 200 rawir.duration = US_TO_NS(rawir.duration);
201 sz_push(sz, rawir); 201 sz_push(sz, rawir);
202} 202}
203 203
@@ -273,6 +273,7 @@ static void streamzap_callback(struct urb *urb)
273 if (sz->timeout_enabled) 273 if (sz->timeout_enabled)
274 sz_push(sz, rawir); 274 sz_push(sz, rawir);
275 ir_raw_event_handle(sz->rdev); 275 ir_raw_event_handle(sz->rdev);
276 ir_raw_event_reset(sz->rdev);
276 } else { 277 } else {
277 sz_push_full_space(sz, sz->buf_in[i]); 278 sz_push_full_space(sz, sz->buf_in[i]);
278 } 279 }
@@ -290,6 +291,7 @@ static void streamzap_callback(struct urb *urb)
290 } 291 }
291 } 292 }
292 293
294 ir_raw_event_handle(sz->rdev);
293 usb_submit_urb(urb, GFP_ATOMIC); 295 usb_submit_urb(urb, GFP_ATOMIC);
294 296
295 return; 297 return;
@@ -430,13 +432,13 @@ static int __devinit streamzap_probe(struct usb_interface *intf,
430 sz->decoder_state = PulseSpace; 432 sz->decoder_state = PulseSpace;
431 /* FIXME: don't yet have a way to set this */ 433 /* FIXME: don't yet have a way to set this */
432 sz->timeout_enabled = true; 434 sz->timeout_enabled = true;
433 sz->rdev->timeout = (((SZ_TIMEOUT * SZ_RESOLUTION * 1000) & 435 sz->rdev->timeout = ((US_TO_NS(SZ_TIMEOUT * SZ_RESOLUTION) &
434 IR_MAX_DURATION) | 0x03000000); 436 IR_MAX_DURATION) | 0x03000000);
435 #if 0 437 #if 0
436 /* not yet supported, depends on patches from maxim */ 438 /* not yet supported, depends on patches from maxim */
437 /* see also: LIRC_GET_REC_RESOLUTION and LIRC_SET_REC_TIMEOUT */ 439 /* see also: LIRC_GET_REC_RESOLUTION and LIRC_SET_REC_TIMEOUT */
438 sz->min_timeout = SZ_TIMEOUT * SZ_RESOLUTION * 1000; 440 sz->min_timeout = US_TO_NS(SZ_TIMEOUT * SZ_RESOLUTION);
439 sz->max_timeout = SZ_TIMEOUT * SZ_RESOLUTION * 1000; 441 sz->max_timeout = US_TO_NS(SZ_TIMEOUT * SZ_RESOLUTION);
440 #endif 442 #endif
441 443
442 do_gettimeofday(&sz->signal_start); 444 do_gettimeofday(&sz->signal_start);
diff --git a/drivers/media/video/gspca/zc3xx.c b/drivers/media/video/gspca/zc3xx.c
index 865216e9362c..47236a58bf33 100644
--- a/drivers/media/video/gspca/zc3xx.c
+++ b/drivers/media/video/gspca/zc3xx.c
@@ -5793,7 +5793,7 @@ static void usb_exchange(struct gspca_dev *gspca_dev,
5793 break; 5793 break;
5794 default: 5794 default:
5795/* case 0xdd: * delay */ 5795/* case 0xdd: * delay */
5796 msleep(action->val / 64 + 10); 5796 msleep(action->idx);
5797 break; 5797 break;
5798 } 5798 }
5799 action++; 5799 action++;
@@ -5830,7 +5830,7 @@ static void setmatrix(struct gspca_dev *gspca_dev)
5830 [SENSOR_GC0305] = gc0305_matrix, 5830 [SENSOR_GC0305] = gc0305_matrix,
5831 [SENSOR_HDCS2020b] = NULL, 5831 [SENSOR_HDCS2020b] = NULL,
5832 [SENSOR_HV7131B] = NULL, 5832 [SENSOR_HV7131B] = NULL,
5833 [SENSOR_HV7131R] = NULL, 5833 [SENSOR_HV7131R] = po2030_matrix,
5834 [SENSOR_ICM105A] = po2030_matrix, 5834 [SENSOR_ICM105A] = po2030_matrix,
5835 [SENSOR_MC501CB] = NULL, 5835 [SENSOR_MC501CB] = NULL,
5836 [SENSOR_MT9V111_1] = gc0305_matrix, 5836 [SENSOR_MT9V111_1] = gc0305_matrix,
@@ -5936,6 +5936,7 @@ static void setquality(struct gspca_dev *gspca_dev)
5936 case SENSOR_ADCM2700: 5936 case SENSOR_ADCM2700:
5937 case SENSOR_GC0305: 5937 case SENSOR_GC0305:
5938 case SENSOR_HV7131B: 5938 case SENSOR_HV7131B:
5939 case SENSOR_HV7131R:
5939 case SENSOR_OV7620: 5940 case SENSOR_OV7620:
5940 case SENSOR_PAS202B: 5941 case SENSOR_PAS202B:
5941 case SENSOR_PO2030: 5942 case SENSOR_PO2030:
@@ -6108,11 +6109,13 @@ static void send_unknown(struct gspca_dev *gspca_dev, int sensor)
6108 reg_w(gspca_dev, 0x02, 0x003b); 6109 reg_w(gspca_dev, 0x02, 0x003b);
6109 reg_w(gspca_dev, 0x00, 0x0038); 6110 reg_w(gspca_dev, 0x00, 0x0038);
6110 break; 6111 break;
6112 case SENSOR_HV7131R:
6111 case SENSOR_PAS202B: 6113 case SENSOR_PAS202B:
6112 reg_w(gspca_dev, 0x03, 0x003b); 6114 reg_w(gspca_dev, 0x03, 0x003b);
6113 reg_w(gspca_dev, 0x0c, 0x003a); 6115 reg_w(gspca_dev, 0x0c, 0x003a);
6114 reg_w(gspca_dev, 0x0b, 0x0039); 6116 reg_w(gspca_dev, 0x0b, 0x0039);
6115 reg_w(gspca_dev, 0x0b, 0x0038); 6117 if (sensor == SENSOR_PAS202B)
6118 reg_w(gspca_dev, 0x0b, 0x0038);
6116 break; 6119 break;
6117 } 6120 }
6118} 6121}
@@ -6704,10 +6707,13 @@ static int sd_start(struct gspca_dev *gspca_dev)
6704 reg_w(gspca_dev, 0x02, 0x003b); 6707 reg_w(gspca_dev, 0x02, 0x003b);
6705 reg_w(gspca_dev, 0x00, 0x0038); 6708 reg_w(gspca_dev, 0x00, 0x0038);
6706 break; 6709 break;
6710 case SENSOR_HV7131R:
6707 case SENSOR_PAS202B: 6711 case SENSOR_PAS202B:
6708 reg_w(gspca_dev, 0x03, 0x003b); 6712 reg_w(gspca_dev, 0x03, 0x003b);
6709 reg_w(gspca_dev, 0x0c, 0x003a); 6713 reg_w(gspca_dev, 0x0c, 0x003a);
6710 reg_w(gspca_dev, 0x0b, 0x0039); 6714 reg_w(gspca_dev, 0x0b, 0x0039);
6715 if (sd->sensor == SENSOR_HV7131R)
6716 reg_w(gspca_dev, 0x50, ZC3XX_R11D_GLOBALGAIN);
6711 break; 6717 break;
6712 } 6718 }
6713 6719
@@ -6720,6 +6726,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
6720 break; 6726 break;
6721 case SENSOR_PAS202B: 6727 case SENSOR_PAS202B:
6722 case SENSOR_GC0305: 6728 case SENSOR_GC0305:
6729 case SENSOR_HV7131R:
6723 case SENSOR_TAS5130C: 6730 case SENSOR_TAS5130C:
6724 reg_r(gspca_dev, 0x0008); 6731 reg_r(gspca_dev, 0x0008);
6725 /* fall thru */ 6732 /* fall thru */
@@ -6760,6 +6767,12 @@ static int sd_start(struct gspca_dev *gspca_dev)
6760 /* ms-win + */ 6767 /* ms-win + */
6761 reg_w(gspca_dev, 0x40, 0x0117); 6768 reg_w(gspca_dev, 0x40, 0x0117);
6762 break; 6769 break;
6770 case SENSOR_HV7131R:
6771 i2c_write(gspca_dev, 0x25, 0x04, 0x00); /* exposure */
6772 i2c_write(gspca_dev, 0x26, 0x93, 0x00);
6773 i2c_write(gspca_dev, 0x27, 0xe0, 0x00);
6774 reg_w(gspca_dev, 0x00, ZC3XX_R1A7_CALCGLOBALMEAN);
6775 break;
6763 case SENSOR_GC0305: 6776 case SENSOR_GC0305:
6764 case SENSOR_TAS5130C: 6777 case SENSOR_TAS5130C:
6765 reg_w(gspca_dev, 0x09, 0x01ad); /* (from win traces) */ 6778 reg_w(gspca_dev, 0x09, 0x01ad); /* (from win traces) */
@@ -6808,9 +6821,17 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
6808{ 6821{
6809 struct sd *sd = (struct sd *) gspca_dev; 6822 struct sd *sd = (struct sd *) gspca_dev;
6810 6823
6811 if (data[0] == 0xff && data[1] == 0xd8) { /* start of frame */ 6824 /* check the JPEG end of frame */
6825 if (len >= 3
6826 && data[len - 3] == 0xff && data[len - 2] == 0xd9) {
6827/*fixme: what does the last byte mean?*/
6812 gspca_frame_add(gspca_dev, LAST_PACKET, 6828 gspca_frame_add(gspca_dev, LAST_PACKET,
6813 NULL, 0); 6829 data, len - 1);
6830 return;
6831 }
6832
6833 /* check the JPEG start of a frame */
6834 if (data[0] == 0xff && data[1] == 0xd8) {
6814 /* put the JPEG header in the new frame */ 6835 /* put the JPEG header in the new frame */
6815 gspca_frame_add(gspca_dev, FIRST_PACKET, 6836 gspca_frame_add(gspca_dev, FIRST_PACKET,
6816 sd->jpeg_hdr, JPEG_HDR_SZ); 6837 sd->jpeg_hdr, JPEG_HDR_SZ);
diff --git a/drivers/media/video/hdpvr/hdpvr-core.c b/drivers/media/video/hdpvr/hdpvr-core.c
index a6572e5ae369..a27d93b503a5 100644
--- a/drivers/media/video/hdpvr/hdpvr-core.c
+++ b/drivers/media/video/hdpvr/hdpvr-core.c
@@ -283,6 +283,7 @@ static int hdpvr_probe(struct usb_interface *interface,
283 struct hdpvr_device *dev; 283 struct hdpvr_device *dev;
284 struct usb_host_interface *iface_desc; 284 struct usb_host_interface *iface_desc;
285 struct usb_endpoint_descriptor *endpoint; 285 struct usb_endpoint_descriptor *endpoint;
286 struct i2c_client *client;
286 size_t buffer_size; 287 size_t buffer_size;
287 int i; 288 int i;
288 int retval = -ENOMEM; 289 int retval = -ENOMEM;
@@ -381,13 +382,21 @@ static int hdpvr_probe(struct usb_interface *interface,
381#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) 382#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
382 retval = hdpvr_register_i2c_adapter(dev); 383 retval = hdpvr_register_i2c_adapter(dev);
383 if (retval < 0) { 384 if (retval < 0) {
384 v4l2_err(&dev->v4l2_dev, "registering i2c adapter failed\n"); 385 v4l2_err(&dev->v4l2_dev, "i2c adapter register failed\n");
385 goto error; 386 goto error;
386 } 387 }
387 388
388 retval = hdpvr_register_i2c_ir(dev); 389 client = hdpvr_register_ir_rx_i2c(dev);
389 if (retval < 0) 390 if (!client) {
390 v4l2_err(&dev->v4l2_dev, "registering i2c IR devices failed\n"); 391 v4l2_err(&dev->v4l2_dev, "i2c IR RX device register failed\n");
392 goto reg_fail;
393 }
394
395 client = hdpvr_register_ir_tx_i2c(dev);
396 if (!client) {
397 v4l2_err(&dev->v4l2_dev, "i2c IR TX device register failed\n");
398 goto reg_fail;
399 }
391#endif 400#endif
392 401
393 /* let the user know what node this device is now attached to */ 402 /* let the user know what node this device is now attached to */
@@ -395,6 +404,10 @@ static int hdpvr_probe(struct usb_interface *interface,
395 video_device_node_name(dev->video_dev)); 404 video_device_node_name(dev->video_dev));
396 return 0; 405 return 0;
397 406
407reg_fail:
408#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
409 i2c_del_adapter(&dev->i2c_adapter);
410#endif
398error: 411error:
399 if (dev) { 412 if (dev) {
400 /* Destroy single thread */ 413 /* Destroy single thread */
@@ -424,6 +437,9 @@ static void hdpvr_disconnect(struct usb_interface *interface)
424 mutex_lock(&dev->io_mutex); 437 mutex_lock(&dev->io_mutex);
425 hdpvr_cancel_queue(dev); 438 hdpvr_cancel_queue(dev);
426 mutex_unlock(&dev->io_mutex); 439 mutex_unlock(&dev->io_mutex);
440#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
441 i2c_del_adapter(&dev->i2c_adapter);
442#endif
427 video_unregister_device(dev->video_dev); 443 video_unregister_device(dev->video_dev);
428 atomic_dec(&dev_nr); 444 atomic_dec(&dev_nr);
429} 445}
diff --git a/drivers/media/video/hdpvr/hdpvr-i2c.c b/drivers/media/video/hdpvr/hdpvr-i2c.c
index 89b71faeaac2..e53fa55d56a1 100644
--- a/drivers/media/video/hdpvr/hdpvr-i2c.c
+++ b/drivers/media/video/hdpvr/hdpvr-i2c.c
@@ -31,26 +31,34 @@
31#define Z8F0811_IR_RX_I2C_ADDR 0x71 31#define Z8F0811_IR_RX_I2C_ADDR 0x71
32 32
33 33
34static struct i2c_board_info hdpvr_i2c_board_info = { 34struct i2c_client *hdpvr_register_ir_tx_i2c(struct hdpvr_device *dev)
35 I2C_BOARD_INFO("ir_tx_z8f0811_hdpvr", Z8F0811_IR_TX_I2C_ADDR), 35{
36 I2C_BOARD_INFO("ir_rx_z8f0811_hdpvr", Z8F0811_IR_RX_I2C_ADDR), 36 struct IR_i2c_init_data *init_data = &dev->ir_i2c_init_data;
37}; 37 struct i2c_board_info hdpvr_ir_tx_i2c_board_info = {
38 I2C_BOARD_INFO("ir_tx_z8f0811_hdpvr", Z8F0811_IR_TX_I2C_ADDR),
39 };
40
41 init_data->name = "HD-PVR";
42 hdpvr_ir_tx_i2c_board_info.platform_data = init_data;
38 43
39int hdpvr_register_i2c_ir(struct hdpvr_device *dev) 44 return i2c_new_device(&dev->i2c_adapter, &hdpvr_ir_tx_i2c_board_info);
45}
46
47struct i2c_client *hdpvr_register_ir_rx_i2c(struct hdpvr_device *dev)
40{ 48{
41 struct i2c_client *c;
42 struct IR_i2c_init_data *init_data = &dev->ir_i2c_init_data; 49 struct IR_i2c_init_data *init_data = &dev->ir_i2c_init_data;
50 struct i2c_board_info hdpvr_ir_rx_i2c_board_info = {
51 I2C_BOARD_INFO("ir_rx_z8f0811_hdpvr", Z8F0811_IR_RX_I2C_ADDR),
52 };
43 53
44 /* Our default information for ir-kbd-i2c.c to use */ 54 /* Our default information for ir-kbd-i2c.c to use */
45 init_data->ir_codes = RC_MAP_HAUPPAUGE_NEW; 55 init_data->ir_codes = RC_MAP_HAUPPAUGE_NEW;
46 init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR; 56 init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR;
47 init_data->type = RC_TYPE_RC5; 57 init_data->type = RC_TYPE_RC5;
48 init_data->name = "HD PVR"; 58 init_data->name = "HD-PVR";
49 hdpvr_i2c_board_info.platform_data = init_data; 59 hdpvr_ir_rx_i2c_board_info.platform_data = init_data;
50
51 c = i2c_new_device(&dev->i2c_adapter, &hdpvr_i2c_board_info);
52 60
53 return (c == NULL) ? -ENODEV : 0; 61 return i2c_new_device(&dev->i2c_adapter, &hdpvr_ir_rx_i2c_board_info);
54} 62}
55 63
56static int hdpvr_i2c_read(struct hdpvr_device *dev, int bus, 64static int hdpvr_i2c_read(struct hdpvr_device *dev, int bus,
diff --git a/drivers/media/video/hdpvr/hdpvr.h b/drivers/media/video/hdpvr/hdpvr.h
index ee74e3be9a6a..072f23c570f3 100644
--- a/drivers/media/video/hdpvr/hdpvr.h
+++ b/drivers/media/video/hdpvr/hdpvr.h
@@ -313,7 +313,8 @@ int hdpvr_cancel_queue(struct hdpvr_device *dev);
313/* i2c adapter registration */ 313/* i2c adapter registration */
314int hdpvr_register_i2c_adapter(struct hdpvr_device *dev); 314int hdpvr_register_i2c_adapter(struct hdpvr_device *dev);
315 315
316int hdpvr_register_i2c_ir(struct hdpvr_device *dev); 316struct i2c_client *hdpvr_register_ir_rx_i2c(struct hdpvr_device *dev);
317struct i2c_client *hdpvr_register_ir_tx_i2c(struct hdpvr_device *dev);
317 318
318/*========================================================================*/ 319/*========================================================================*/
319/* buffer management */ 320/* buffer management */
diff --git a/drivers/media/video/ir-kbd-i2c.c b/drivers/media/video/ir-kbd-i2c.c
index d2b20ad383a3..a221ad68b330 100644
--- a/drivers/media/video/ir-kbd-i2c.c
+++ b/drivers/media/video/ir-kbd-i2c.c
@@ -128,6 +128,19 @@ static int get_key_haup(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
128 128
129static int get_key_haup_xvr(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw) 129static int get_key_haup_xvr(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
130{ 130{
131 int ret;
132 unsigned char buf[1] = { 0 };
133
134 /*
135 * This is the same apparent "are you ready?" poll command observed
136 * watching Windows driver traffic and implemented in lirc_zilog. With
137 * this added, we get far saner remote behavior with z8 chips on usb
138 * connected devices, even with the default polling interval of 100ms.
139 */
140 ret = i2c_master_send(ir->c, buf, 1);
141 if (ret != 1)
142 return (ret < 0) ? ret : -EINVAL;
143
131 return get_key_haup_common (ir, ir_key, ir_raw, 6, 3); 144 return get_key_haup_common (ir, ir_key, ir_raw, 6, 3);
132} 145}
133 146
diff --git a/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c b/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c
index ccc884948f34..451ecd485f97 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c
@@ -597,7 +597,6 @@ static void pvr2_i2c_register_ir(struct pvr2_hdw *hdw)
597 init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR; 597 init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR;
598 init_data->type = RC_TYPE_RC5; 598 init_data->type = RC_TYPE_RC5;
599 init_data->name = hdw->hdw_desc->description; 599 init_data->name = hdw->hdw_desc->description;
600 init_data->polling_interval = 260; /* ms From lirc_zilog */
601 /* IR Receiver */ 600 /* IR Receiver */
602 info.addr = 0x71; 601 info.addr = 0x71;
603 info.platform_data = init_data; 602 info.platform_data = init_data;
diff --git a/drivers/media/video/saa7115.c b/drivers/media/video/saa7115.c
index f35459d1f42f..0db90922ee93 100644
--- a/drivers/media/video/saa7115.c
+++ b/drivers/media/video/saa7115.c
@@ -1565,7 +1565,7 @@ static int saa711x_probe(struct i2c_client *client,
1565 chip_id = name[5]; 1565 chip_id = name[5];
1566 1566
1567 /* Check whether this chip is part of the saa711x series */ 1567 /* Check whether this chip is part of the saa711x series */
1568 if (memcmp(name, "1f711", 5)) { 1568 if (memcmp(name + 1, "f711", 4)) {
1569 v4l_dbg(1, debug, client, "chip found @ 0x%x (ID %s) does not match a known saa711x chip.\n", 1569 v4l_dbg(1, debug, client, "chip found @ 0x%x (ID %s) does not match a known saa711x chip.\n",
1570 client->addr << 1, name); 1570 client->addr << 1, name);
1571 return -ENODEV; 1571 return -ENODEV;
diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c
index e9a3eab7b0cf..8c1d85e27be4 100644
--- a/drivers/memstick/core/memstick.c
+++ b/drivers/memstick/core/memstick.c
@@ -621,7 +621,7 @@ static int __init memstick_init(void)
621{ 621{
622 int rc; 622 int rc;
623 623
624 workqueue = create_freezeable_workqueue("kmemstick"); 624 workqueue = create_freezable_workqueue("kmemstick");
625 if (!workqueue) 625 if (!workqueue)
626 return -ENOMEM; 626 return -ENOMEM;
627 627
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index f71f22948477..1735c84ff757 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -76,8 +76,8 @@
76#define COPYRIGHT "Copyright (c) 1999-2008 " MODULEAUTHOR 76#define COPYRIGHT "Copyright (c) 1999-2008 " MODULEAUTHOR
77#endif 77#endif
78 78
79#define MPT_LINUX_VERSION_COMMON "3.04.17" 79#define MPT_LINUX_VERSION_COMMON "3.04.18"
80#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.17" 80#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.18"
81#define WHAT_MAGIC_STRING "@" "(" "#" ")" 81#define WHAT_MAGIC_STRING "@" "(" "#" ")"
82 82
83#define show_mptmod_ver(s,ver) \ 83#define show_mptmod_ver(s,ver) \
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index a3856ed90aef..e8deb8ed0499 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -597,6 +597,13 @@ mptctl_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
597} 597}
598 598
599static int 599static int
600mptctl_release(struct inode *inode, struct file *filep)
601{
602 fasync_helper(-1, filep, 0, &async_queue);
603 return 0;
604}
605
606static int
600mptctl_fasync(int fd, struct file *filep, int mode) 607mptctl_fasync(int fd, struct file *filep, int mode)
601{ 608{
602 MPT_ADAPTER *ioc; 609 MPT_ADAPTER *ioc;
@@ -2815,6 +2822,7 @@ static const struct file_operations mptctl_fops = {
2815 .llseek = no_llseek, 2822 .llseek = no_llseek,
2816 .fasync = mptctl_fasync, 2823 .fasync = mptctl_fasync,
2817 .unlocked_ioctl = mptctl_ioctl, 2824 .unlocked_ioctl = mptctl_ioctl,
2825 .release = mptctl_release,
2818#ifdef CONFIG_COMPAT 2826#ifdef CONFIG_COMPAT
2819 .compat_ioctl = compat_mpctl_ioctl, 2827 .compat_ioctl = compat_mpctl_ioctl,
2820#endif 2828#endif
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 59b8f53d1ece..0d9b82a44540 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -1873,8 +1873,9 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
1873 } 1873 }
1874 1874
1875 out: 1875 out:
1876 printk(MYIOC_s_INFO_FMT "task abort: %s (sc=%p)\n", 1876 printk(MYIOC_s_INFO_FMT "task abort: %s (rv=%04x) (sc=%p) (sn=%ld)\n",
1877 ioc->name, ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), SCpnt); 1877 ioc->name, ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), retval,
1878 SCpnt, SCpnt->serial_number);
1878 1879
1879 return retval; 1880 return retval;
1880} 1881}
@@ -1911,7 +1912,7 @@ mptscsih_dev_reset(struct scsi_cmnd * SCpnt)
1911 1912
1912 vdevice = SCpnt->device->hostdata; 1913 vdevice = SCpnt->device->hostdata;
1913 if (!vdevice || !vdevice->vtarget) { 1914 if (!vdevice || !vdevice->vtarget) {
1914 retval = SUCCESS; 1915 retval = 0;
1915 goto out; 1916 goto out;
1916 } 1917 }
1917 1918
diff --git a/drivers/misc/tifm_core.c b/drivers/misc/tifm_core.c
index 5f6852dff40b..44d4475a09dd 100644
--- a/drivers/misc/tifm_core.c
+++ b/drivers/misc/tifm_core.c
@@ -329,7 +329,7 @@ static int __init tifm_init(void)
329{ 329{
330 int rc; 330 int rc;
331 331
332 workqueue = create_freezeable_workqueue("tifm"); 332 workqueue = create_freezable_workqueue("tifm");
333 if (!workqueue) 333 if (!workqueue)
334 return -ENOMEM; 334 return -ENOMEM;
335 335
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index 4d2ea8e80140..6df5a55da110 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -785,7 +785,7 @@ static int __init vmballoon_init(void)
785 if (x86_hyper != &x86_hyper_vmware) 785 if (x86_hyper != &x86_hyper_vmware)
786 return -ENODEV; 786 return -ENODEV;
787 787
788 vmballoon_wq = create_freezeable_workqueue("vmmemctl"); 788 vmballoon_wq = create_freezable_workqueue("vmmemctl");
789 if (!vmballoon_wq) { 789 if (!vmballoon_wq) {
790 pr_err("failed to create workqueue\n"); 790 pr_err("failed to create workqueue\n");
791 return -ENOMEM; 791 return -ENOMEM;
diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c
index d9d7efbc77cc..6322d1fb5d62 100644
--- a/drivers/mtd/nand/r852.c
+++ b/drivers/mtd/nand/r852.c
@@ -930,7 +930,7 @@ int r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
930 930
931 init_completion(&dev->dma_done); 931 init_completion(&dev->dma_done);
932 932
933 dev->card_workqueue = create_freezeable_workqueue(DRV_NAME); 933 dev->card_workqueue = create_freezable_workqueue(DRV_NAME);
934 934
935 if (!dev->card_workqueue) 935 if (!dev->card_workqueue)
936 goto error9; 936 goto error9;
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
index 67822cf6c025..ac0d6a8613b5 100644
--- a/drivers/mtd/sm_ftl.c
+++ b/drivers/mtd/sm_ftl.c
@@ -1258,7 +1258,7 @@ static struct mtd_blktrans_ops sm_ftl_ops = {
1258static __init int sm_module_init(void) 1258static __init int sm_module_init(void)
1259{ 1259{
1260 int error = 0; 1260 int error = 0;
1261 cache_flush_workqueue = create_freezeable_workqueue("smflush"); 1261 cache_flush_workqueue = create_freezable_workqueue("smflush");
1262 1262
1263 if (IS_ERR(cache_flush_workqueue)) 1263 if (IS_ERR(cache_flush_workqueue))
1264 return PTR_ERR(cache_flush_workqueue); 1264 return PTR_ERR(cache_flush_workqueue);
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c
index a699bbf20eb5..3824382faecc 100644
--- a/drivers/net/atl1c/atl1c_main.c
+++ b/drivers/net/atl1c/atl1c_main.c
@@ -48,6 +48,7 @@ static DEFINE_PCI_DEVICE_TABLE(atl1c_pci_tbl) = {
48 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L2C_B)}, 48 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L2C_B)},
49 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L2C_B2)}, 49 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L2C_B2)},
50 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L1D)}, 50 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L1D)},
51 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L1D_2_0)},
51 /* required last entry */ 52 /* required last entry */
52 { 0 } 53 { 0 }
53}; 54};
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 0c7811faf72c..a179cc6d79f2 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -1786,6 +1786,10 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter,
1786 spin_lock_bh(&adapter->mcc_lock); 1786 spin_lock_bh(&adapter->mcc_lock);
1787 1787
1788 wrb = wrb_from_mccq(adapter); 1788 wrb = wrb_from_mccq(adapter);
1789 if (!wrb) {
1790 status = -EBUSY;
1791 goto err;
1792 }
1789 req = nonemb_cmd->va; 1793 req = nonemb_cmd->va;
1790 sge = nonembedded_sgl(wrb); 1794 sge = nonembedded_sgl(wrb);
1791 1795
@@ -1801,6 +1805,7 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter,
1801 1805
1802 status = be_mcc_notify_wait(adapter); 1806 status = be_mcc_notify_wait(adapter);
1803 1807
1808err:
1804 spin_unlock_bh(&adapter->mcc_lock); 1809 spin_unlock_bh(&adapter->mcc_lock);
1805 return status; 1810 return status;
1806} 1811}
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index de40d3b7152f..28a32a6c8bf1 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -312,11 +312,9 @@ void be_link_status_update(struct be_adapter *adapter, bool link_up)
312 if (adapter->link_up != link_up) { 312 if (adapter->link_up != link_up) {
313 adapter->link_speed = -1; 313 adapter->link_speed = -1;
314 if (link_up) { 314 if (link_up) {
315 netif_start_queue(netdev);
316 netif_carrier_on(netdev); 315 netif_carrier_on(netdev);
317 printk(KERN_INFO "%s: Link up\n", netdev->name); 316 printk(KERN_INFO "%s: Link up\n", netdev->name);
318 } else { 317 } else {
319 netif_stop_queue(netdev);
320 netif_carrier_off(netdev); 318 netif_carrier_off(netdev);
321 printk(KERN_INFO "%s: Link down\n", netdev->name); 319 printk(KERN_INFO "%s: Link down\n", netdev->name);
322 } 320 }
@@ -2628,8 +2626,6 @@ static void be_netdev_init(struct net_device *netdev)
2628 2626
2629 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc, 2627 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
2630 BE_NAPI_WEIGHT); 2628 BE_NAPI_WEIGHT);
2631
2632 netif_stop_queue(netdev);
2633} 2629}
2634 2630
2635static void be_unmap_pci_bars(struct be_adapter *adapter) 2631static void be_unmap_pci_bars(struct be_adapter *adapter)
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index 8e4183717d91..653c62475cb6 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -22,8 +22,8 @@
22 * (you will need to reboot afterwards) */ 22 * (you will need to reboot afterwards) */
23/* #define BNX2X_STOP_ON_ERROR */ 23/* #define BNX2X_STOP_ON_ERROR */
24 24
25#define DRV_MODULE_VERSION "1.62.00-4" 25#define DRV_MODULE_VERSION "1.62.00-5"
26#define DRV_MODULE_RELDATE "2011/01/18" 26#define DRV_MODULE_RELDATE "2011/01/30"
27#define BNX2X_BC_VER 0x040200 27#define BNX2X_BC_VER 0x040200
28 28
29#define BNX2X_MULTI_QUEUE 29#define BNX2X_MULTI_QUEUE
diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c
index 7160ec51093e..dd1210fddfff 100644
--- a/drivers/net/bnx2x/bnx2x_link.c
+++ b/drivers/net/bnx2x/bnx2x_link.c
@@ -3948,48 +3948,6 @@ static u8 bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
3948 return rc; 3948 return rc;
3949} 3949}
3950 3950
3951static void bnx2x_8073_set_xaui_low_power_mode(struct bnx2x *bp,
3952 struct bnx2x_phy *phy)
3953{
3954 u16 val;
3955 bnx2x_cl45_read(bp, phy,
3956 MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_CHIP_REV, &val);
3957
3958 if (val == 0) {
3959 /* Mustn't set low power mode in 8073 A0 */
3960 return;
3961 }
3962
3963 /* Disable PLL sequencer (use read-modify-write to clear bit 13) */
3964 bnx2x_cl45_read(bp, phy,
3965 MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, &val);
3966 val &= ~(1<<13);
3967 bnx2x_cl45_write(bp, phy,
3968 MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, val);
3969
3970 /* PLL controls */
3971 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805E, 0x1077);
3972 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805D, 0x0000);
3973 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805C, 0x030B);
3974 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805B, 0x1240);
3975 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805A, 0x2490);
3976
3977 /* Tx Controls */
3978 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80A7, 0x0C74);
3979 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80A6, 0x9041);
3980 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80A5, 0x4640);
3981
3982 /* Rx Controls */
3983 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80FE, 0x01C4);
3984 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80FD, 0x9249);
3985 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80FC, 0x2015);
3986
3987 /* Enable PLL sequencer (use read-modify-write to set bit 13) */
3988 bnx2x_cl45_read(bp, phy, MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, &val);
3989 val |= (1<<13);
3990 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, val);
3991}
3992
3993/******************************************************************/ 3951/******************************************************************/
3994/* BCM8073 PHY SECTION */ 3952/* BCM8073 PHY SECTION */
3995/******************************************************************/ 3953/******************************************************************/
@@ -4148,8 +4106,6 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
4148 4106
4149 bnx2x_8073_set_pause_cl37(params, phy, vars); 4107 bnx2x_8073_set_pause_cl37(params, phy, vars);
4150 4108
4151 bnx2x_8073_set_xaui_low_power_mode(bp, phy);
4152
4153 bnx2x_cl45_read(bp, phy, 4109 bnx2x_cl45_read(bp, phy,
4154 MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1); 4110 MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1);
4155 4111
@@ -6519,6 +6475,18 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
6519 MDIO_PMA_DEVAD, 6475 MDIO_PMA_DEVAD,
6520 MDIO_PMA_REG_8481_LED1_MASK, 6476 MDIO_PMA_REG_8481_LED1_MASK,
6521 0x80); 6477 0x80);
6478
6479 /* Tell LED3 to blink on source */
6480 bnx2x_cl45_read(bp, phy,
6481 MDIO_PMA_DEVAD,
6482 MDIO_PMA_REG_8481_LINK_SIGNAL,
6483 &val);
6484 val &= ~(7<<6);
6485 val |= (1<<6); /* A83B[8:6]= 1 */
6486 bnx2x_cl45_write(bp, phy,
6487 MDIO_PMA_DEVAD,
6488 MDIO_PMA_REG_8481_LINK_SIGNAL,
6489 val);
6522 } 6490 }
6523 break; 6491 break;
6524 } 6492 }
@@ -7720,10 +7688,13 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
7720 struct bnx2x_phy phy[PORT_MAX]; 7688 struct bnx2x_phy phy[PORT_MAX];
7721 struct bnx2x_phy *phy_blk[PORT_MAX]; 7689 struct bnx2x_phy *phy_blk[PORT_MAX];
7722 u16 val; 7690 u16 val;
7723 s8 port; 7691 s8 port = 0;
7724 s8 port_of_path = 0; 7692 s8 port_of_path = 0;
7725 7693 u32 swap_val, swap_override;
7726 bnx2x_ext_phy_hw_reset(bp, 0); 7694 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7695 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
7696 port ^= (swap_val && swap_override);
7697 bnx2x_ext_phy_hw_reset(bp, port);
7727 /* PART1 - Reset both phys */ 7698 /* PART1 - Reset both phys */
7728 for (port = PORT_MAX - 1; port >= PORT_0; port--) { 7699 for (port = PORT_MAX - 1; port >= PORT_0; port--) {
7729 u32 shmem_base, shmem2_base; 7700 u32 shmem_base, shmem2_base;
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index 8cdcf5b39d1e..d584d32c747d 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -2301,15 +2301,10 @@ static void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
2301 /* accept matched ucast */ 2301 /* accept matched ucast */
2302 drop_all_ucast = 0; 2302 drop_all_ucast = 0;
2303 } 2303 }
2304 if (filters & BNX2X_ACCEPT_MULTICAST) { 2304 if (filters & BNX2X_ACCEPT_MULTICAST)
2305 /* accept matched mcast */ 2305 /* accept matched mcast */
2306 drop_all_mcast = 0; 2306 drop_all_mcast = 0;
2307 if (IS_MF_SI(bp)) 2307
2308 /* since mcast addresses won't arrive with ovlan,
2309 * fw needs to accept all of them in
2310 * switch-independent mode */
2311 accp_all_mcast = 1;
2312 }
2313 if (filters & BNX2X_ACCEPT_ALL_UNICAST) { 2308 if (filters & BNX2X_ACCEPT_ALL_UNICAST) {
2314 /* accept all mcast */ 2309 /* accept all mcast */
2315 drop_all_ucast = 0; 2310 drop_all_ucast = 0;
@@ -4281,9 +4276,12 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4281 def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST | 4276 def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
4282 BNX2X_ACCEPT_MULTICAST; 4277 BNX2X_ACCEPT_MULTICAST;
4283#ifdef BCM_CNIC 4278#ifdef BCM_CNIC
4284 cl_id = bnx2x_fcoe(bp, cl_id); 4279 if (!NO_FCOE(bp)) {
4285 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_UNICAST | 4280 cl_id = bnx2x_fcoe(bp, cl_id);
4286 BNX2X_ACCEPT_MULTICAST); 4281 bnx2x_rxq_set_mac_filters(bp, cl_id,
4282 BNX2X_ACCEPT_UNICAST |
4283 BNX2X_ACCEPT_MULTICAST);
4284 }
4287#endif 4285#endif
4288 break; 4286 break;
4289 4287
@@ -4291,18 +4289,29 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4291 def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST | 4289 def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
4292 BNX2X_ACCEPT_ALL_MULTICAST; 4290 BNX2X_ACCEPT_ALL_MULTICAST;
4293#ifdef BCM_CNIC 4291#ifdef BCM_CNIC
4294 cl_id = bnx2x_fcoe(bp, cl_id); 4292 /*
4295 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_UNICAST | 4293 * Prevent duplication of multicast packets by configuring FCoE
4296 BNX2X_ACCEPT_MULTICAST); 4294 * L2 Client to receive only matched unicast frames.
4295 */
4296 if (!NO_FCOE(bp)) {
4297 cl_id = bnx2x_fcoe(bp, cl_id);
4298 bnx2x_rxq_set_mac_filters(bp, cl_id,
4299 BNX2X_ACCEPT_UNICAST);
4300 }
4297#endif 4301#endif
4298 break; 4302 break;
4299 4303
4300 case BNX2X_RX_MODE_PROMISC: 4304 case BNX2X_RX_MODE_PROMISC:
4301 def_q_filters |= BNX2X_PROMISCUOUS_MODE; 4305 def_q_filters |= BNX2X_PROMISCUOUS_MODE;
4302#ifdef BCM_CNIC 4306#ifdef BCM_CNIC
4303 cl_id = bnx2x_fcoe(bp, cl_id); 4307 /*
4304 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_UNICAST | 4308 * Prevent packets duplication by configuring DROP_ALL for FCoE
4305 BNX2X_ACCEPT_MULTICAST); 4309 * L2 Client.
4310 */
4311 if (!NO_FCOE(bp)) {
4312 cl_id = bnx2x_fcoe(bp, cl_id);
4313 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
4314 }
4306#endif 4315#endif
4307 /* pass management unicast packets as well */ 4316 /* pass management unicast packets as well */
4308 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST; 4317 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
@@ -5296,10 +5305,6 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
5296 } 5305 }
5297 } 5306 }
5298 5307
5299 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
5300 bp->common.shmem_base,
5301 bp->common.shmem2_base);
5302
5303 bnx2x_setup_fan_failure_detection(bp); 5308 bnx2x_setup_fan_failure_detection(bp);
5304 5309
5305 /* clear PXP2 attentions */ 5310 /* clear PXP2 attentions */
@@ -5503,9 +5508,6 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
5503 5508
5504 bnx2x_init_block(bp, MCP_BLOCK, init_stage); 5509 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
5505 bnx2x_init_block(bp, DMAE_BLOCK, init_stage); 5510 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
5506 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
5507 bp->common.shmem_base,
5508 bp->common.shmem2_base);
5509 if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base, 5511 if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base,
5510 bp->common.shmem2_base, port)) { 5512 bp->common.shmem2_base, port)) {
5511 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 5513 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
@@ -8379,6 +8381,17 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8379 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)) 8381 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8380 bp->mdio.prtad = 8382 bp->mdio.prtad =
8381 XGXS_EXT_PHY_ADDR(ext_phy_config); 8383 XGXS_EXT_PHY_ADDR(ext_phy_config);
8384
8385 /*
8386 * Check if hw lock is required to access MDC/MDIO bus to the PHY(s)
8387 * In MF mode, it is set to cover self test cases
8388 */
8389 if (IS_MF(bp))
8390 bp->port.need_hw_lock = 1;
8391 else
8392 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
8393 bp->common.shmem_base,
8394 bp->common.shmem2_base);
8382} 8395}
8383 8396
8384static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp) 8397static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 986195eaa57c..5dec456fd4a4 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -23,7 +23,7 @@ config CAN_SLCAN
23 23
24 As only the sending and receiving of CAN frames is implemented, this 24 As only the sending and receiving of CAN frames is implemented, this
25 driver should work with the (serial/USB) CAN hardware from: 25 driver should work with the (serial/USB) CAN hardware from:
26 www.canusb.com / www.can232.com / www.mictronic.com / www.canhack.de 26 www.canusb.com / www.can232.com / www.mictronics.de / www.canhack.de
27 27
28 Userspace tools to attach the SLCAN line discipline (slcan_attach, 28 Userspace tools to attach the SLCAN line discipline (slcan_attach,
29 slcand) can be found in the can-utils at the SocketCAN SVN, see 29 slcand) can be found in the can-utils at the SocketCAN SVN, see
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 2532b9631538..57d2ffbbb433 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -1109,7 +1109,7 @@ static ssize_t at91_sysfs_set_mb0_id(struct device *dev,
1109 return ret; 1109 return ret;
1110} 1110}
1111 1111
1112static DEVICE_ATTR(mb0_id, S_IWUGO | S_IRUGO, 1112static DEVICE_ATTR(mb0_id, S_IWUSR | S_IRUGO,
1113 at91_sysfs_show_mb0_id, at91_sysfs_set_mb0_id); 1113 at91_sysfs_show_mb0_id, at91_sysfs_set_mb0_id);
1114 1114
1115static struct attribute *at91_sysfs_attrs[] = { 1115static struct attribute *at91_sysfs_attrs[] = {
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index b9a6d7a5a739..366f5cc050ae 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -1618,7 +1618,7 @@ static ssize_t ican3_sysfs_set_term(struct device *dev,
1618 return count; 1618 return count;
1619} 1619}
1620 1620
1621static DEVICE_ATTR(termination, S_IWUGO | S_IRUGO, ican3_sysfs_show_term, 1621static DEVICE_ATTR(termination, S_IWUSR | S_IRUGO, ican3_sysfs_show_term,
1622 ican3_sysfs_set_term); 1622 ican3_sysfs_set_term);
1623 1623
1624static struct attribute *ican3_sysfs_attrs[] = { 1624static struct attribute *ican3_sysfs_attrs[] = {
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index 7ab534aee452..7513c4523ac4 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -940,7 +940,7 @@ static int mcp251x_open(struct net_device *net)
940 goto open_unlock; 940 goto open_unlock;
941 } 941 }
942 942
943 priv->wq = create_freezeable_workqueue("mcp251x_wq"); 943 priv->wq = create_freezable_workqueue("mcp251x_wq");
944 INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler); 944 INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler);
945 INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler); 945 INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler);
946 946
diff --git a/drivers/net/can/mscan/Kconfig b/drivers/net/can/mscan/Kconfig
index 27d1d398e25e..d38706958af6 100644
--- a/drivers/net/can/mscan/Kconfig
+++ b/drivers/net/can/mscan/Kconfig
@@ -1,5 +1,5 @@
1config CAN_MSCAN 1config CAN_MSCAN
2 depends on CAN_DEV && (PPC || M68K || M68KNOMMU) 2 depends on CAN_DEV && (PPC || M68K)
3 tristate "Support for Freescale MSCAN based chips" 3 tristate "Support for Freescale MSCAN based chips"
4 ---help--- 4 ---help---
5 The Motorola Scalable Controller Area Network (MSCAN) definition 5 The Motorola Scalable Controller Area Network (MSCAN) definition
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
index c42e97268248..e54712b22c27 100644
--- a/drivers/net/can/pch_can.c
+++ b/drivers/net/can/pch_can.c
@@ -185,7 +185,7 @@ struct pch_can_priv {
185 185
186static struct can_bittiming_const pch_can_bittiming_const = { 186static struct can_bittiming_const pch_can_bittiming_const = {
187 .name = KBUILD_MODNAME, 187 .name = KBUILD_MODNAME,
188 .tseg1_min = 1, 188 .tseg1_min = 2,
189 .tseg1_max = 16, 189 .tseg1_max = 16,
190 .tseg2_min = 1, 190 .tseg2_min = 1,
191 .tseg2_max = 8, 191 .tseg2_max = 8,
@@ -959,13 +959,13 @@ static void __devexit pch_can_remove(struct pci_dev *pdev)
959 struct pch_can_priv *priv = netdev_priv(ndev); 959 struct pch_can_priv *priv = netdev_priv(ndev);
960 960
961 unregister_candev(priv->ndev); 961 unregister_candev(priv->ndev);
962 pci_iounmap(pdev, priv->regs);
963 if (priv->use_msi) 962 if (priv->use_msi)
964 pci_disable_msi(priv->dev); 963 pci_disable_msi(priv->dev);
965 pci_release_regions(pdev); 964 pci_release_regions(pdev);
966 pci_disable_device(pdev); 965 pci_disable_device(pdev);
967 pci_set_drvdata(pdev, NULL); 966 pci_set_drvdata(pdev, NULL);
968 pch_can_reset(priv); 967 pch_can_reset(priv);
968 pci_iounmap(pdev, priv->regs);
969 free_candev(priv->ndev); 969 free_candev(priv->ndev);
970} 970}
971 971
@@ -1238,6 +1238,7 @@ static int __devinit pch_can_probe(struct pci_dev *pdev,
1238 priv->use_msi = 0; 1238 priv->use_msi = 0;
1239 } else { 1239 } else {
1240 netdev_err(ndev, "PCH CAN opened with MSI\n"); 1240 netdev_err(ndev, "PCH CAN opened with MSI\n");
1241 pci_set_master(pdev);
1241 priv->use_msi = 1; 1242 priv->use_msi = 1;
1242 } 1243 }
1243 1244
diff --git a/drivers/net/can/softing/Kconfig b/drivers/net/can/softing/Kconfig
index 92bd6bdde5e3..5de46a9a77bb 100644
--- a/drivers/net/can/softing/Kconfig
+++ b/drivers/net/can/softing/Kconfig
@@ -1,6 +1,6 @@
1config CAN_SOFTING 1config CAN_SOFTING
2 tristate "Softing Gmbh CAN generic support" 2 tristate "Softing Gmbh CAN generic support"
3 depends on CAN_DEV 3 depends on CAN_DEV && HAS_IOMEM
4 ---help--- 4 ---help---
5 Support for CAN cards from Softing Gmbh & some cards 5 Support for CAN cards from Softing Gmbh & some cards
6 from Vector Gmbh. 6 from Vector Gmbh.
@@ -18,7 +18,7 @@ config CAN_SOFTING
18config CAN_SOFTING_CS 18config CAN_SOFTING_CS
19 tristate "Softing Gmbh CAN pcmcia cards" 19 tristate "Softing Gmbh CAN pcmcia cards"
20 depends on PCMCIA 20 depends on PCMCIA
21 select CAN_SOFTING 21 depends on CAN_SOFTING
22 ---help--- 22 ---help---
23 Support for PCMCIA cards from Softing Gmbh & some cards 23 Support for PCMCIA cards from Softing Gmbh & some cards
24 from Vector Gmbh. 24 from Vector Gmbh.
diff --git a/drivers/net/can/softing/softing_cs.c b/drivers/net/can/softing/softing_cs.c
index 300fe75dd1a7..c11bb4de8630 100644
--- a/drivers/net/can/softing/softing_cs.c
+++ b/drivers/net/can/softing/softing_cs.c
@@ -19,6 +19,7 @@
19 19
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/kernel.h> 21#include <linux/kernel.h>
22#include <linux/slab.h>
22 23
23#include <pcmcia/cistpl.h> 24#include <pcmcia/cistpl.h>
24#include <pcmcia/ds.h> 25#include <pcmcia/ds.h>
diff --git a/drivers/net/cxgb4vf/cxgb4vf_main.c b/drivers/net/cxgb4vf/cxgb4vf_main.c
index 56166ae2059f..6aad64df4dcb 100644
--- a/drivers/net/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/cxgb4vf/cxgb4vf_main.c
@@ -2040,7 +2040,7 @@ static int __devinit setup_debugfs(struct adapter *adapter)
2040{ 2040{
2041 int i; 2041 int i;
2042 2042
2043 BUG_ON(adapter->debugfs_root == NULL); 2043 BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root));
2044 2044
2045 /* 2045 /*
2046 * Debugfs support is best effort. 2046 * Debugfs support is best effort.
@@ -2061,7 +2061,7 @@ static int __devinit setup_debugfs(struct adapter *adapter)
2061 */ 2061 */
2062static void cleanup_debugfs(struct adapter *adapter) 2062static void cleanup_debugfs(struct adapter *adapter)
2063{ 2063{
2064 BUG_ON(adapter->debugfs_root == NULL); 2064 BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root));
2065 2065
2066 /* 2066 /*
2067 * Unlike our sister routine cleanup_proc(), we don't need to remove 2067 * Unlike our sister routine cleanup_proc(), we don't need to remove
@@ -2489,17 +2489,6 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
2489 struct net_device *netdev; 2489 struct net_device *netdev;
2490 2490
2491 /* 2491 /*
2492 * Vet our module parameters.
2493 */
2494 if (msi != MSI_MSIX && msi != MSI_MSI) {
2495 dev_err(&pdev->dev, "bad module parameter msi=%d; must be %d"
2496 " (MSI-X or MSI) or %d (MSI)\n", msi, MSI_MSIX,
2497 MSI_MSI);
2498 err = -EINVAL;
2499 goto err_out;
2500 }
2501
2502 /*
2503 * Print our driver banner the first time we're called to initialize a 2492 * Print our driver banner the first time we're called to initialize a
2504 * device. 2493 * device.
2505 */ 2494 */
@@ -2711,11 +2700,11 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
2711 /* 2700 /*
2712 * Set up our debugfs entries. 2701 * Set up our debugfs entries.
2713 */ 2702 */
2714 if (cxgb4vf_debugfs_root) { 2703 if (!IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) {
2715 adapter->debugfs_root = 2704 adapter->debugfs_root =
2716 debugfs_create_dir(pci_name(pdev), 2705 debugfs_create_dir(pci_name(pdev),
2717 cxgb4vf_debugfs_root); 2706 cxgb4vf_debugfs_root);
2718 if (adapter->debugfs_root == NULL) 2707 if (IS_ERR_OR_NULL(adapter->debugfs_root))
2719 dev_warn(&pdev->dev, "could not create debugfs" 2708 dev_warn(&pdev->dev, "could not create debugfs"
2720 " directory"); 2709 " directory");
2721 else 2710 else
@@ -2770,7 +2759,7 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
2770 */ 2759 */
2771 2760
2772err_free_debugfs: 2761err_free_debugfs:
2773 if (adapter->debugfs_root) { 2762 if (!IS_ERR_OR_NULL(adapter->debugfs_root)) {
2774 cleanup_debugfs(adapter); 2763 cleanup_debugfs(adapter);
2775 debugfs_remove_recursive(adapter->debugfs_root); 2764 debugfs_remove_recursive(adapter->debugfs_root);
2776 } 2765 }
@@ -2802,7 +2791,6 @@ err_release_regions:
2802err_disable_device: 2791err_disable_device:
2803 pci_disable_device(pdev); 2792 pci_disable_device(pdev);
2804 2793
2805err_out:
2806 return err; 2794 return err;
2807} 2795}
2808 2796
@@ -2840,7 +2828,7 @@ static void __devexit cxgb4vf_pci_remove(struct pci_dev *pdev)
2840 /* 2828 /*
2841 * Tear down our debugfs entries. 2829 * Tear down our debugfs entries.
2842 */ 2830 */
2843 if (adapter->debugfs_root) { 2831 if (!IS_ERR_OR_NULL(adapter->debugfs_root)) {
2844 cleanup_debugfs(adapter); 2832 cleanup_debugfs(adapter);
2845 debugfs_remove_recursive(adapter->debugfs_root); 2833 debugfs_remove_recursive(adapter->debugfs_root);
2846 } 2834 }
@@ -2874,6 +2862,46 @@ static void __devexit cxgb4vf_pci_remove(struct pci_dev *pdev)
2874} 2862}
2875 2863
2876/* 2864/*
2865 * "Shutdown" quiesce the device, stopping Ingress Packet and Interrupt
2866 * delivery.
2867 */
2868static void __devexit cxgb4vf_pci_shutdown(struct pci_dev *pdev)
2869{
2870 struct adapter *adapter;
2871 int pidx;
2872
2873 adapter = pci_get_drvdata(pdev);
2874 if (!adapter)
2875 return;
2876
2877 /*
2878 * Disable all Virtual Interfaces. This will shut down the
2879 * delivery of all ingress packets into the chip for these
2880 * Virtual Interfaces.
2881 */
2882 for_each_port(adapter, pidx) {
2883 struct net_device *netdev;
2884 struct port_info *pi;
2885
2886 if (!test_bit(pidx, &adapter->registered_device_map))
2887 continue;
2888
2889 netdev = adapter->port[pidx];
2890 if (!netdev)
2891 continue;
2892
2893 pi = netdev_priv(netdev);
2894 t4vf_enable_vi(adapter, pi->viid, false, false);
2895 }
2896
2897 /*
2898 * Free up all Queues which will prevent further DMA and
2899 * Interrupts allowing various internal pathways to drain.
2900 */
2901 t4vf_free_sge_resources(adapter);
2902}
2903
2904/*
2877 * PCI Device registration data structures. 2905 * PCI Device registration data structures.
2878 */ 2906 */
2879#define CH_DEVICE(devid, idx) \ 2907#define CH_DEVICE(devid, idx) \
@@ -2906,6 +2934,7 @@ static struct pci_driver cxgb4vf_driver = {
2906 .id_table = cxgb4vf_pci_tbl, 2934 .id_table = cxgb4vf_pci_tbl,
2907 .probe = cxgb4vf_pci_probe, 2935 .probe = cxgb4vf_pci_probe,
2908 .remove = __devexit_p(cxgb4vf_pci_remove), 2936 .remove = __devexit_p(cxgb4vf_pci_remove),
2937 .shutdown = __devexit_p(cxgb4vf_pci_shutdown),
2909}; 2938};
2910 2939
2911/* 2940/*
@@ -2915,14 +2944,25 @@ static int __init cxgb4vf_module_init(void)
2915{ 2944{
2916 int ret; 2945 int ret;
2917 2946
2947 /*
2948 * Vet our module parameters.
2949 */
2950 if (msi != MSI_MSIX && msi != MSI_MSI) {
2951 printk(KERN_WARNING KBUILD_MODNAME
2952 ": bad module parameter msi=%d; must be %d"
2953 " (MSI-X or MSI) or %d (MSI)\n",
2954 msi, MSI_MSIX, MSI_MSI);
2955 return -EINVAL;
2956 }
2957
2918 /* Debugfs support is optional, just warn if this fails */ 2958 /* Debugfs support is optional, just warn if this fails */
2919 cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); 2959 cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
2920 if (!cxgb4vf_debugfs_root) 2960 if (IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
2921 printk(KERN_WARNING KBUILD_MODNAME ": could not create" 2961 printk(KERN_WARNING KBUILD_MODNAME ": could not create"
2922 " debugfs entry, continuing\n"); 2962 " debugfs entry, continuing\n");
2923 2963
2924 ret = pci_register_driver(&cxgb4vf_driver); 2964 ret = pci_register_driver(&cxgb4vf_driver);
2925 if (ret < 0) 2965 if (ret < 0 && !IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
2926 debugfs_remove(cxgb4vf_debugfs_root); 2966 debugfs_remove(cxgb4vf_debugfs_root);
2927 return ret; 2967 return ret;
2928} 2968}
diff --git a/drivers/net/cxgb4vf/t4vf_hw.c b/drivers/net/cxgb4vf/t4vf_hw.c
index 0f51c80475ce..192db226ec7f 100644
--- a/drivers/net/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/cxgb4vf/t4vf_hw.c
@@ -171,7 +171,7 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
171 delay_idx = 0; 171 delay_idx = 0;
172 ms = delay[0]; 172 ms = delay[0];
173 173
174 for (i = 0; i < 500; i += ms) { 174 for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
175 if (sleep_ok) { 175 if (sleep_ok) {
176 ms = delay[delay_idx]; 176 ms = delay[delay_idx];
177 if (delay_idx < ARRAY_SIZE(delay) - 1) 177 if (delay_idx < ARRAY_SIZE(delay) - 1)
diff --git a/drivers/net/depca.c b/drivers/net/depca.c
index 1b48b68ad4fd..8b0084d17c8c 100644
--- a/drivers/net/depca.c
+++ b/drivers/net/depca.c
@@ -1094,7 +1094,7 @@ static int depca_rx(struct net_device *dev)
1094 } 1094 }
1095 } 1095 }
1096 /* Change buffer ownership for this last frame, back to the adapter */ 1096 /* Change buffer ownership for this last frame, back to the adapter */
1097 for (; lp->rx_old != entry; lp->rx_old = (++lp->rx_old) & lp->rxRingMask) { 1097 for (; lp->rx_old != entry; lp->rx_old = (lp->rx_old + 1) & lp->rxRingMask) {
1098 writel(readl(&lp->rx_ring[lp->rx_old].base) | R_OWN, &lp->rx_ring[lp->rx_old].base); 1098 writel(readl(&lp->rx_ring[lp->rx_old].base) | R_OWN, &lp->rx_ring[lp->rx_old].base);
1099 } 1099 }
1100 writel(readl(&lp->rx_ring[entry].base) | R_OWN, &lp->rx_ring[entry].base); 1100 writel(readl(&lp->rx_ring[entry].base) | R_OWN, &lp->rx_ring[entry].base);
@@ -1103,7 +1103,7 @@ static int depca_rx(struct net_device *dev)
1103 /* 1103 /*
1104 ** Update entry information 1104 ** Update entry information
1105 */ 1105 */
1106 lp->rx_new = (++lp->rx_new) & lp->rxRingMask; 1106 lp->rx_new = (lp->rx_new + 1) & lp->rxRingMask;
1107 } 1107 }
1108 1108
1109 return 0; 1109 return 0;
@@ -1148,7 +1148,7 @@ static int depca_tx(struct net_device *dev)
1148 } 1148 }
1149 1149
1150 /* Update all the pointers */ 1150 /* Update all the pointers */
1151 lp->tx_old = (++lp->tx_old) & lp->txRingMask; 1151 lp->tx_old = (lp->tx_old + 1) & lp->txRingMask;
1152 } 1152 }
1153 1153
1154 return 0; 1154 return 0;
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index e1a8216ff692..c05db6046050 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -1753,8 +1753,6 @@ rio_close (struct net_device *dev)
1753 1753
1754 /* Free all the skbuffs in the queue. */ 1754 /* Free all the skbuffs in the queue. */
1755 for (i = 0; i < RX_RING_SIZE; i++) { 1755 for (i = 0; i < RX_RING_SIZE; i++) {
1756 np->rx_ring[i].status = 0;
1757 np->rx_ring[i].fraginfo = 0;
1758 skb = np->rx_skbuff[i]; 1756 skb = np->rx_skbuff[i];
1759 if (skb) { 1757 if (skb) {
1760 pci_unmap_single(np->pdev, 1758 pci_unmap_single(np->pdev,
@@ -1763,6 +1761,8 @@ rio_close (struct net_device *dev)
1763 dev_kfree_skb (skb); 1761 dev_kfree_skb (skb);
1764 np->rx_skbuff[i] = NULL; 1762 np->rx_skbuff[i] = NULL;
1765 } 1763 }
1764 np->rx_ring[i].status = 0;
1765 np->rx_ring[i].fraginfo = 0;
1766 } 1766 }
1767 for (i = 0; i < TX_RING_SIZE; i++) { 1767 for (i = 0; i < TX_RING_SIZE; i++) {
1768 skb = np->tx_skbuff[i]; 1768 skb = np->tx_skbuff[i];
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index aed223b1b897..7501d977d992 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -124,6 +124,7 @@ static s32 e1000_set_phy_type(struct e1000_hw *hw)
124 case M88E1000_I_PHY_ID: 124 case M88E1000_I_PHY_ID:
125 case M88E1011_I_PHY_ID: 125 case M88E1011_I_PHY_ID:
126 case M88E1111_I_PHY_ID: 126 case M88E1111_I_PHY_ID:
127 case M88E1118_E_PHY_ID:
127 hw->phy_type = e1000_phy_m88; 128 hw->phy_type = e1000_phy_m88;
128 break; 129 break;
129 case IGP01E1000_I_PHY_ID: 130 case IGP01E1000_I_PHY_ID:
@@ -3222,7 +3223,8 @@ static s32 e1000_detect_gig_phy(struct e1000_hw *hw)
3222 break; 3223 break;
3223 case e1000_ce4100: 3224 case e1000_ce4100:
3224 if ((hw->phy_id == RTL8211B_PHY_ID) || 3225 if ((hw->phy_id == RTL8211B_PHY_ID) ||
3225 (hw->phy_id == RTL8201N_PHY_ID)) 3226 (hw->phy_id == RTL8201N_PHY_ID) ||
3227 (hw->phy_id == M88E1118_E_PHY_ID))
3226 match = true; 3228 match = true;
3227 break; 3229 break;
3228 case e1000_82541: 3230 case e1000_82541:
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h
index 196eeda2dd6c..c70b23d52284 100644
--- a/drivers/net/e1000/e1000_hw.h
+++ b/drivers/net/e1000/e1000_hw.h
@@ -2917,6 +2917,7 @@ struct e1000_host_command_info {
2917#define M88E1000_14_PHY_ID M88E1000_E_PHY_ID 2917#define M88E1000_14_PHY_ID M88E1000_E_PHY_ID
2918#define M88E1011_I_REV_4 0x04 2918#define M88E1011_I_REV_4 0x04
2919#define M88E1111_I_PHY_ID 0x01410CC0 2919#define M88E1111_I_PHY_ID 0x01410CC0
2920#define M88E1118_E_PHY_ID 0x01410E40
2920#define L1LXT971A_PHY_ID 0x001378E0 2921#define L1LXT971A_PHY_ID 0x001378E0
2921 2922
2922#define RTL8211B_PHY_ID 0x001CC910 2923#define RTL8211B_PHY_ID 0x001CC910
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 1c18f26b0812..3fa110ddb041 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -937,6 +937,9 @@ static void e1000_print_hw_hang(struct work_struct *work)
937 u16 phy_status, phy_1000t_status, phy_ext_status; 937 u16 phy_status, phy_1000t_status, phy_ext_status;
938 u16 pci_status; 938 u16 pci_status;
939 939
940 if (test_bit(__E1000_DOWN, &adapter->state))
941 return;
942
940 e1e_rphy(hw, PHY_STATUS, &phy_status); 943 e1e_rphy(hw, PHY_STATUS, &phy_status);
941 e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status); 944 e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status);
942 e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status); 945 e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status);
@@ -1506,6 +1509,9 @@ static void e1000e_downshift_workaround(struct work_struct *work)
1506 struct e1000_adapter *adapter = container_of(work, 1509 struct e1000_adapter *adapter = container_of(work,
1507 struct e1000_adapter, downshift_task); 1510 struct e1000_adapter, downshift_task);
1508 1511
1512 if (test_bit(__E1000_DOWN, &adapter->state))
1513 return;
1514
1509 e1000e_gig_downshift_workaround_ich8lan(&adapter->hw); 1515 e1000e_gig_downshift_workaround_ich8lan(&adapter->hw);
1510} 1516}
1511 1517
@@ -3338,6 +3344,21 @@ int e1000e_up(struct e1000_adapter *adapter)
3338 return 0; 3344 return 0;
3339} 3345}
3340 3346
3347static void e1000e_flush_descriptors(struct e1000_adapter *adapter)
3348{
3349 struct e1000_hw *hw = &adapter->hw;
3350
3351 if (!(adapter->flags2 & FLAG2_DMA_BURST))
3352 return;
3353
3354 /* flush pending descriptor writebacks to memory */
3355 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
3356 ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
3357
3358 /* execute the writes immediately */
3359 e1e_flush();
3360}
3361
3341void e1000e_down(struct e1000_adapter *adapter) 3362void e1000e_down(struct e1000_adapter *adapter)
3342{ 3363{
3343 struct net_device *netdev = adapter->netdev; 3364 struct net_device *netdev = adapter->netdev;
@@ -3377,6 +3398,9 @@ void e1000e_down(struct e1000_adapter *adapter)
3377 3398
3378 if (!pci_channel_offline(adapter->pdev)) 3399 if (!pci_channel_offline(adapter->pdev))
3379 e1000e_reset(adapter); 3400 e1000e_reset(adapter);
3401
3402 e1000e_flush_descriptors(adapter);
3403
3380 e1000_clean_tx_ring(adapter); 3404 e1000_clean_tx_ring(adapter);
3381 e1000_clean_rx_ring(adapter); 3405 e1000_clean_rx_ring(adapter);
3382 3406
@@ -3765,6 +3789,10 @@ static void e1000e_update_phy_task(struct work_struct *work)
3765{ 3789{
3766 struct e1000_adapter *adapter = container_of(work, 3790 struct e1000_adapter *adapter = container_of(work,
3767 struct e1000_adapter, update_phy_task); 3791 struct e1000_adapter, update_phy_task);
3792
3793 if (test_bit(__E1000_DOWN, &adapter->state))
3794 return;
3795
3768 e1000_get_phy_info(&adapter->hw); 3796 e1000_get_phy_info(&adapter->hw);
3769} 3797}
3770 3798
@@ -3775,6 +3803,10 @@ static void e1000e_update_phy_task(struct work_struct *work)
3775static void e1000_update_phy_info(unsigned long data) 3803static void e1000_update_phy_info(unsigned long data)
3776{ 3804{
3777 struct e1000_adapter *adapter = (struct e1000_adapter *) data; 3805 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
3806
3807 if (test_bit(__E1000_DOWN, &adapter->state))
3808 return;
3809
3778 schedule_work(&adapter->update_phy_task); 3810 schedule_work(&adapter->update_phy_task);
3779} 3811}
3780 3812
@@ -4149,6 +4181,9 @@ static void e1000_watchdog_task(struct work_struct *work)
4149 u32 link, tctl; 4181 u32 link, tctl;
4150 int tx_pending = 0; 4182 int tx_pending = 0;
4151 4183
4184 if (test_bit(__E1000_DOWN, &adapter->state))
4185 return;
4186
4152 link = e1000e_has_link(adapter); 4187 link = e1000e_has_link(adapter);
4153 if ((netif_carrier_ok(netdev)) && link) { 4188 if ((netif_carrier_ok(netdev)) && link) {
4154 /* Cancel scheduled suspend requests. */ 4189 /* Cancel scheduled suspend requests. */
@@ -4309,7 +4344,6 @@ link_up:
4309 * to get done, so reset controller to flush Tx. 4344 * to get done, so reset controller to flush Tx.
4310 * (Do the reset outside of interrupt context). 4345 * (Do the reset outside of interrupt context).
4311 */ 4346 */
4312 adapter->tx_timeout_count++;
4313 schedule_work(&adapter->reset_task); 4347 schedule_work(&adapter->reset_task);
4314 /* return immediately since reset is imminent */ 4348 /* return immediately since reset is imminent */
4315 return; 4349 return;
@@ -4338,19 +4372,12 @@ link_up:
4338 else 4372 else
4339 ew32(ICS, E1000_ICS_RXDMT0); 4373 ew32(ICS, E1000_ICS_RXDMT0);
4340 4374
4375 /* flush pending descriptors to memory before detecting Tx hang */
4376 e1000e_flush_descriptors(adapter);
4377
4341 /* Force detection of hung controller every watchdog period */ 4378 /* Force detection of hung controller every watchdog period */
4342 adapter->detect_tx_hung = 1; 4379 adapter->detect_tx_hung = 1;
4343 4380
4344 /* flush partial descriptors to memory before detecting Tx hang */
4345 if (adapter->flags2 & FLAG2_DMA_BURST) {
4346 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
4347 ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
4348 /*
4349 * no need to flush the writes because the timeout code does
4350 * an er32 first thing
4351 */
4352 }
4353
4354 /* 4381 /*
4355 * With 82571 controllers, LAA may be overwritten due to controller 4382 * With 82571 controllers, LAA may be overwritten due to controller
4356 * reset from the other port. Set the appropriate LAA in RAR[0] 4383 * reset from the other port. Set the appropriate LAA in RAR[0]
@@ -4888,6 +4915,10 @@ static void e1000_reset_task(struct work_struct *work)
4888 struct e1000_adapter *adapter; 4915 struct e1000_adapter *adapter;
4889 adapter = container_of(work, struct e1000_adapter, reset_task); 4916 adapter = container_of(work, struct e1000_adapter, reset_task);
4890 4917
4918 /* don't run the task if already down */
4919 if (test_bit(__E1000_DOWN, &adapter->state))
4920 return;
4921
4891 if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) && 4922 if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
4892 (adapter->flags & FLAG_RX_RESTART_NOW))) { 4923 (adapter->flags & FLAG_RX_RESTART_NOW))) {
4893 e1000e_dump(adapter); 4924 e1000e_dump(adapter);
diff --git a/drivers/net/enc28j60.c b/drivers/net/enc28j60.c
index 112c5aa9af7f..907b05a1c659 100644
--- a/drivers/net/enc28j60.c
+++ b/drivers/net/enc28j60.c
@@ -812,7 +812,7 @@ static void enc28j60_read_tsv(struct enc28j60_net *priv, u8 tsv[TSV_SIZE])
812 if (netif_msg_hw(priv)) 812 if (netif_msg_hw(priv))
813 printk(KERN_DEBUG DRV_NAME ": reading TSV at addr:0x%04x\n", 813 printk(KERN_DEBUG DRV_NAME ": reading TSV at addr:0x%04x\n",
814 endptr + 1); 814 endptr + 1);
815 enc28j60_mem_read(priv, endptr + 1, sizeof(tsv), tsv); 815 enc28j60_mem_read(priv, endptr + 1, TSV_SIZE, tsv);
816} 816}
817 817
818static void enc28j60_dump_tsv(struct enc28j60_net *priv, const char *msg, 818static void enc28j60_dump_tsv(struct enc28j60_net *priv, const char *msg,
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index af09296ef0dd..9c0b1bac6af6 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -5645,6 +5645,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5645 goto out_error; 5645 goto out_error;
5646 } 5646 }
5647 5647
5648 netif_carrier_off(dev);
5649
5648 dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n", 5650 dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n",
5649 dev->name, np->phy_oui, np->phyaddr, dev->dev_addr); 5651 dev->name, np->phy_oui, np->phyaddr, dev->dev_addr);
5650 5652
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index d5ede2df3e42..ebbda7d15254 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -1370,6 +1370,9 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
1370 hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr); 1370 hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr);
1371 1371
1372 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 1372 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1373
1374 /* clear VMDq pool/queue selection for RAR 0 */
1375 hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
1373 } 1376 }
1374 hw->addr_ctrl.overflow_promisc = 0; 1377 hw->addr_ctrl.overflow_promisc = 0;
1375 1378
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index 6342d4859790..c54a88274d51 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -159,13 +159,13 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
159 struct scatterlist *sg; 159 struct scatterlist *sg;
160 unsigned int i, j, dmacount; 160 unsigned int i, j, dmacount;
161 unsigned int len; 161 unsigned int len;
162 static const unsigned int bufflen = 4096; 162 static const unsigned int bufflen = IXGBE_FCBUFF_MIN;
163 unsigned int firstoff = 0; 163 unsigned int firstoff = 0;
164 unsigned int lastsize; 164 unsigned int lastsize;
165 unsigned int thisoff = 0; 165 unsigned int thisoff = 0;
166 unsigned int thislen = 0; 166 unsigned int thislen = 0;
167 u32 fcbuff, fcdmarw, fcfltrw; 167 u32 fcbuff, fcdmarw, fcfltrw;
168 dma_addr_t addr; 168 dma_addr_t addr = 0;
169 169
170 if (!netdev || !sgl) 170 if (!netdev || !sgl)
171 return 0; 171 return 0;
@@ -254,6 +254,24 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
254 /* only the last buffer may have non-full bufflen */ 254 /* only the last buffer may have non-full bufflen */
255 lastsize = thisoff + thislen; 255 lastsize = thisoff + thislen;
256 256
257 /*
258 * lastsize can not be buffer len.
259 * If it is then adding another buffer with lastsize = 1.
260 */
261 if (lastsize == bufflen) {
262 if (j >= IXGBE_BUFFCNT_MAX) {
263 e_err(drv, "xid=%x:%d,%d,%d:addr=%llx "
264 "not enough user buffers. We need an extra "
265 "buffer because lastsize is bufflen.\n",
266 xid, i, j, dmacount, (u64)addr);
267 goto out_noddp_free;
268 }
269
270 ddp->udl[j] = (u64)(fcoe->extra_ddp_buffer_dma);
271 j++;
272 lastsize = 1;
273 }
274
257 fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT); 275 fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT);
258 fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT); 276 fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT);
259 fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT); 277 fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT);
@@ -532,6 +550,24 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
532 e_err(drv, "failed to allocated FCoE DDP pool\n"); 550 e_err(drv, "failed to allocated FCoE DDP pool\n");
533 551
534 spin_lock_init(&fcoe->lock); 552 spin_lock_init(&fcoe->lock);
553
554 /* Extra buffer to be shared by all DDPs for HW work around */
555 fcoe->extra_ddp_buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC);
556 if (fcoe->extra_ddp_buffer == NULL) {
557 e_err(drv, "failed to allocated extra DDP buffer\n");
558 goto out_extra_ddp_buffer_alloc;
559 }
560
561 fcoe->extra_ddp_buffer_dma =
562 dma_map_single(&adapter->pdev->dev,
563 fcoe->extra_ddp_buffer,
564 IXGBE_FCBUFF_MIN,
565 DMA_FROM_DEVICE);
566 if (dma_mapping_error(&adapter->pdev->dev,
567 fcoe->extra_ddp_buffer_dma)) {
568 e_err(drv, "failed to map extra DDP buffer\n");
569 goto out_extra_ddp_buffer_dma;
570 }
535 } 571 }
536 572
537 /* Enable L2 eth type filter for FCoE */ 573 /* Enable L2 eth type filter for FCoE */
@@ -581,6 +617,14 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
581 } 617 }
582 } 618 }
583#endif 619#endif
620
621 return;
622
623out_extra_ddp_buffer_dma:
624 kfree(fcoe->extra_ddp_buffer);
625out_extra_ddp_buffer_alloc:
626 pci_pool_destroy(fcoe->pool);
627 fcoe->pool = NULL;
584} 628}
585 629
586/** 630/**
@@ -600,6 +644,11 @@ void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter)
600 if (fcoe->pool) { 644 if (fcoe->pool) {
601 for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++) 645 for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++)
602 ixgbe_fcoe_ddp_put(adapter->netdev, i); 646 ixgbe_fcoe_ddp_put(adapter->netdev, i);
647 dma_unmap_single(&adapter->pdev->dev,
648 fcoe->extra_ddp_buffer_dma,
649 IXGBE_FCBUFF_MIN,
650 DMA_FROM_DEVICE);
651 kfree(fcoe->extra_ddp_buffer);
603 pci_pool_destroy(fcoe->pool); 652 pci_pool_destroy(fcoe->pool);
604 fcoe->pool = NULL; 653 fcoe->pool = NULL;
605 } 654 }
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.h b/drivers/net/ixgbe/ixgbe_fcoe.h
index 4bc2c551c8db..65cc8fb14fe7 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.h
+++ b/drivers/net/ixgbe/ixgbe_fcoe.h
@@ -70,6 +70,8 @@ struct ixgbe_fcoe {
70 spinlock_t lock; 70 spinlock_t lock;
71 struct pci_pool *pool; 71 struct pci_pool *pool;
72 struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX]; 72 struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX];
73 unsigned char *extra_ddp_buffer;
74 dma_addr_t extra_ddp_buffer_dma;
73}; 75};
74 76
75#endif /* _IXGBE_FCOE_H */ 77#endif /* _IXGBE_FCOE_H */
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 602078b84892..30f9ccfb4f87 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -52,7 +52,7 @@ char ixgbe_driver_name[] = "ixgbe";
52static const char ixgbe_driver_string[] = 52static const char ixgbe_driver_string[] =
53 "Intel(R) 10 Gigabit PCI Express Network Driver"; 53 "Intel(R) 10 Gigabit PCI Express Network Driver";
54 54
55#define DRV_VERSION "3.0.12-k2" 55#define DRV_VERSION "3.2.9-k2"
56const char ixgbe_driver_version[] = DRV_VERSION; 56const char ixgbe_driver_version[] = DRV_VERSION;
57static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation."; 57static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation.";
58 58
@@ -3176,9 +3176,16 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
3176 u32 mhadd, hlreg0; 3176 u32 mhadd, hlreg0;
3177 3177
3178 /* Decide whether to use packet split mode or not */ 3178 /* Decide whether to use packet split mode or not */
3179 /* On by default */
3180 adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
3181
3179 /* Do not use packet split if we're in SR-IOV Mode */ 3182 /* Do not use packet split if we're in SR-IOV Mode */
3180 if (!adapter->num_vfs) 3183 if (adapter->num_vfs)
3181 adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED; 3184 adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
3185
3186 /* Disable packet split due to 82599 erratum #45 */
3187 if (hw->mac.type == ixgbe_mac_82599EB)
3188 adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
3182 3189
3183 /* Set the RX buffer length according to the mode */ 3190 /* Set the RX buffer length according to the mode */
3184 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { 3191 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
@@ -3721,7 +3728,8 @@ static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
3721 * We need to try and force an autonegotiation 3728 * We need to try and force an autonegotiation
3722 * session, then bring up link. 3729 * session, then bring up link.
3723 */ 3730 */
3724 hw->mac.ops.setup_sfp(hw); 3731 if (hw->mac.ops.setup_sfp)
3732 hw->mac.ops.setup_sfp(hw);
3725 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK)) 3733 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
3726 schedule_work(&adapter->multispeed_fiber_task); 3734 schedule_work(&adapter->multispeed_fiber_task);
3727 } else { 3735 } else {
@@ -4863,16 +4871,13 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
4863{ 4871{
4864 int q_idx, num_q_vectors; 4872 int q_idx, num_q_vectors;
4865 struct ixgbe_q_vector *q_vector; 4873 struct ixgbe_q_vector *q_vector;
4866 int napi_vectors;
4867 int (*poll)(struct napi_struct *, int); 4874 int (*poll)(struct napi_struct *, int);
4868 4875
4869 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 4876 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
4870 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 4877 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
4871 napi_vectors = adapter->num_rx_queues;
4872 poll = &ixgbe_clean_rxtx_many; 4878 poll = &ixgbe_clean_rxtx_many;
4873 } else { 4879 } else {
4874 num_q_vectors = 1; 4880 num_q_vectors = 1;
4875 napi_vectors = 1;
4876 poll = &ixgbe_poll; 4881 poll = &ixgbe_poll;
4877 } 4882 }
4878 4883
@@ -5964,7 +5969,8 @@ static void ixgbe_sfp_config_module_task(struct work_struct *work)
5964 unregister_netdev(adapter->netdev); 5969 unregister_netdev(adapter->netdev);
5965 return; 5970 return;
5966 } 5971 }
5967 hw->mac.ops.setup_sfp(hw); 5972 if (hw->mac.ops.setup_sfp)
5973 hw->mac.ops.setup_sfp(hw);
5968 5974
5969 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK)) 5975 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
5970 /* This will also work for DA Twinax connections */ 5976 /* This will also work for DA Twinax connections */
diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c
index 47b15738b009..187b3a16ec1f 100644
--- a/drivers/net/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ixgbe/ixgbe_sriov.c
@@ -110,12 +110,10 @@ static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid,
110 return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add); 110 return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add);
111} 111}
112 112
113
114static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe) 113static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
115{ 114{
116 u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); 115 u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
117 vmolr |= (IXGBE_VMOLR_ROMPE | 116 vmolr |= (IXGBE_VMOLR_ROMPE |
118 IXGBE_VMOLR_ROPE |
119 IXGBE_VMOLR_BAM); 117 IXGBE_VMOLR_BAM);
120 if (aupe) 118 if (aupe)
121 vmolr |= IXGBE_VMOLR_AUPE; 119 vmolr |= IXGBE_VMOLR_AUPE;
diff --git a/drivers/net/ixgbe/ixgbe_x540.c b/drivers/net/ixgbe/ixgbe_x540.c
index 3a8923993ce3..f2518b01067d 100644
--- a/drivers/net/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ixgbe/ixgbe_x540.c
@@ -133,17 +133,17 @@ static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
133 } 133 }
134 134
135 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); 135 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
136 IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST)); 136 IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | reset_bit));
137 IXGBE_WRITE_FLUSH(hw); 137 IXGBE_WRITE_FLUSH(hw);
138 138
139 /* Poll for reset bit to self-clear indicating reset is complete */ 139 /* Poll for reset bit to self-clear indicating reset is complete */
140 for (i = 0; i < 10; i++) { 140 for (i = 0; i < 10; i++) {
141 udelay(1); 141 udelay(1);
142 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); 142 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
143 if (!(ctrl & IXGBE_CTRL_RST)) 143 if (!(ctrl & reset_bit))
144 break; 144 break;
145 } 145 }
146 if (ctrl & IXGBE_CTRL_RST) { 146 if (ctrl & reset_bit) {
147 status = IXGBE_ERR_RESET_FAILED; 147 status = IXGBE_ERR_RESET_FAILED;
148 hw_dbg(hw, "Reset polling failed to complete.\n"); 148 hw_dbg(hw, "Reset polling failed to complete.\n");
149 } 149 }
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 4ffdc18fcb8a..2765a3ce9c24 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -1286,6 +1286,21 @@ static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table) = {
1286 { PCI_VDEVICE(MELLANOX, 0x6764) }, /* MT26468 ConnectX EN 10GigE PCIe gen2*/ 1286 { PCI_VDEVICE(MELLANOX, 0x6764) }, /* MT26468 ConnectX EN 10GigE PCIe gen2*/
1287 { PCI_VDEVICE(MELLANOX, 0x6746) }, /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */ 1287 { PCI_VDEVICE(MELLANOX, 0x6746) }, /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
1288 { PCI_VDEVICE(MELLANOX, 0x676e) }, /* MT26478 ConnectX2 40GigE PCIe gen2 */ 1288 { PCI_VDEVICE(MELLANOX, 0x676e) }, /* MT26478 ConnectX2 40GigE PCIe gen2 */
1289 { PCI_VDEVICE(MELLANOX, 0x1002) }, /* MT25400 Family [ConnectX-2 Virtual Function] */
1290 { PCI_VDEVICE(MELLANOX, 0x1003) }, /* MT27500 Family [ConnectX-3] */
1291 { PCI_VDEVICE(MELLANOX, 0x1004) }, /* MT27500 Family [ConnectX-3 Virtual Function] */
1292 { PCI_VDEVICE(MELLANOX, 0x1005) }, /* MT27510 Family */
1293 { PCI_VDEVICE(MELLANOX, 0x1006) }, /* MT27511 Family */
1294 { PCI_VDEVICE(MELLANOX, 0x1007) }, /* MT27520 Family */
1295 { PCI_VDEVICE(MELLANOX, 0x1008) }, /* MT27521 Family */
1296 { PCI_VDEVICE(MELLANOX, 0x1009) }, /* MT27530 Family */
1297 { PCI_VDEVICE(MELLANOX, 0x100a) }, /* MT27531 Family */
1298 { PCI_VDEVICE(MELLANOX, 0x100b) }, /* MT27540 Family */
1299 { PCI_VDEVICE(MELLANOX, 0x100c) }, /* MT27541 Family */
1300 { PCI_VDEVICE(MELLANOX, 0x100d) }, /* MT27550 Family */
1301 { PCI_VDEVICE(MELLANOX, 0x100e) }, /* MT27551 Family */
1302 { PCI_VDEVICE(MELLANOX, 0x100f) }, /* MT27560 Family */
1303 { PCI_VDEVICE(MELLANOX, 0x1010) }, /* MT27561 Family */
1289 { 0, } 1304 { 0, }
1290}; 1305};
1291 1306
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index 2541321bad82..9fb59d3f9c92 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -4489,6 +4489,9 @@ static int niu_alloc_channels(struct niu *np)
4489{ 4489{
4490 struct niu_parent *parent = np->parent; 4490 struct niu_parent *parent = np->parent;
4491 int first_rx_channel, first_tx_channel; 4491 int first_rx_channel, first_tx_channel;
4492 int num_rx_rings, num_tx_rings;
4493 struct rx_ring_info *rx_rings;
4494 struct tx_ring_info *tx_rings;
4492 int i, port, err; 4495 int i, port, err;
4493 4496
4494 port = np->port; 4497 port = np->port;
@@ -4498,18 +4501,21 @@ static int niu_alloc_channels(struct niu *np)
4498 first_tx_channel += parent->txchan_per_port[i]; 4501 first_tx_channel += parent->txchan_per_port[i];
4499 } 4502 }
4500 4503
4501 np->num_rx_rings = parent->rxchan_per_port[port]; 4504 num_rx_rings = parent->rxchan_per_port[port];
4502 np->num_tx_rings = parent->txchan_per_port[port]; 4505 num_tx_rings = parent->txchan_per_port[port];
4503 4506
4504 netif_set_real_num_rx_queues(np->dev, np->num_rx_rings); 4507 rx_rings = kcalloc(num_rx_rings, sizeof(struct rx_ring_info),
4505 netif_set_real_num_tx_queues(np->dev, np->num_tx_rings); 4508 GFP_KERNEL);
4506
4507 np->rx_rings = kcalloc(np->num_rx_rings, sizeof(struct rx_ring_info),
4508 GFP_KERNEL);
4509 err = -ENOMEM; 4509 err = -ENOMEM;
4510 if (!np->rx_rings) 4510 if (!rx_rings)
4511 goto out_err; 4511 goto out_err;
4512 4512
4513 np->num_rx_rings = num_rx_rings;
4514 smp_wmb();
4515 np->rx_rings = rx_rings;
4516
4517 netif_set_real_num_rx_queues(np->dev, num_rx_rings);
4518
4513 for (i = 0; i < np->num_rx_rings; i++) { 4519 for (i = 0; i < np->num_rx_rings; i++) {
4514 struct rx_ring_info *rp = &np->rx_rings[i]; 4520 struct rx_ring_info *rp = &np->rx_rings[i];
4515 4521
@@ -4538,12 +4544,18 @@ static int niu_alloc_channels(struct niu *np)
4538 return err; 4544 return err;
4539 } 4545 }
4540 4546
4541 np->tx_rings = kcalloc(np->num_tx_rings, sizeof(struct tx_ring_info), 4547 tx_rings = kcalloc(num_tx_rings, sizeof(struct tx_ring_info),
4542 GFP_KERNEL); 4548 GFP_KERNEL);
4543 err = -ENOMEM; 4549 err = -ENOMEM;
4544 if (!np->tx_rings) 4550 if (!tx_rings)
4545 goto out_err; 4551 goto out_err;
4546 4552
4553 np->num_tx_rings = num_tx_rings;
4554 smp_wmb();
4555 np->tx_rings = tx_rings;
4556
4557 netif_set_real_num_tx_queues(np->dev, num_tx_rings);
4558
4547 for (i = 0; i < np->num_tx_rings; i++) { 4559 for (i = 0; i < np->num_tx_rings; i++) {
4548 struct tx_ring_info *rp = &np->tx_rings[i]; 4560 struct tx_ring_info *rp = &np->tx_rings[i];
4549 4561
@@ -6246,11 +6258,17 @@ static void niu_sync_mac_stats(struct niu *np)
6246static void niu_get_rx_stats(struct niu *np) 6258static void niu_get_rx_stats(struct niu *np)
6247{ 6259{
6248 unsigned long pkts, dropped, errors, bytes; 6260 unsigned long pkts, dropped, errors, bytes;
6261 struct rx_ring_info *rx_rings;
6249 int i; 6262 int i;
6250 6263
6251 pkts = dropped = errors = bytes = 0; 6264 pkts = dropped = errors = bytes = 0;
6265
6266 rx_rings = ACCESS_ONCE(np->rx_rings);
6267 if (!rx_rings)
6268 goto no_rings;
6269
6252 for (i = 0; i < np->num_rx_rings; i++) { 6270 for (i = 0; i < np->num_rx_rings; i++) {
6253 struct rx_ring_info *rp = &np->rx_rings[i]; 6271 struct rx_ring_info *rp = &rx_rings[i];
6254 6272
6255 niu_sync_rx_discard_stats(np, rp, 0); 6273 niu_sync_rx_discard_stats(np, rp, 0);
6256 6274
@@ -6259,6 +6277,8 @@ static void niu_get_rx_stats(struct niu *np)
6259 dropped += rp->rx_dropped; 6277 dropped += rp->rx_dropped;
6260 errors += rp->rx_errors; 6278 errors += rp->rx_errors;
6261 } 6279 }
6280
6281no_rings:
6262 np->dev->stats.rx_packets = pkts; 6282 np->dev->stats.rx_packets = pkts;
6263 np->dev->stats.rx_bytes = bytes; 6283 np->dev->stats.rx_bytes = bytes;
6264 np->dev->stats.rx_dropped = dropped; 6284 np->dev->stats.rx_dropped = dropped;
@@ -6268,16 +6288,24 @@ static void niu_get_rx_stats(struct niu *np)
6268static void niu_get_tx_stats(struct niu *np) 6288static void niu_get_tx_stats(struct niu *np)
6269{ 6289{
6270 unsigned long pkts, errors, bytes; 6290 unsigned long pkts, errors, bytes;
6291 struct tx_ring_info *tx_rings;
6271 int i; 6292 int i;
6272 6293
6273 pkts = errors = bytes = 0; 6294 pkts = errors = bytes = 0;
6295
6296 tx_rings = ACCESS_ONCE(np->tx_rings);
6297 if (!tx_rings)
6298 goto no_rings;
6299
6274 for (i = 0; i < np->num_tx_rings; i++) { 6300 for (i = 0; i < np->num_tx_rings; i++) {
6275 struct tx_ring_info *rp = &np->tx_rings[i]; 6301 struct tx_ring_info *rp = &tx_rings[i];
6276 6302
6277 pkts += rp->tx_packets; 6303 pkts += rp->tx_packets;
6278 bytes += rp->tx_bytes; 6304 bytes += rp->tx_bytes;
6279 errors += rp->tx_errors; 6305 errors += rp->tx_errors;
6280 } 6306 }
6307
6308no_rings:
6281 np->dev->stats.tx_packets = pkts; 6309 np->dev->stats.tx_packets = pkts;
6282 np->dev->stats.tx_bytes = bytes; 6310 np->dev->stats.tx_bytes = bytes;
6283 np->dev->stats.tx_errors = errors; 6311 np->dev->stats.tx_errors = errors;
@@ -6287,9 +6315,10 @@ static struct net_device_stats *niu_get_stats(struct net_device *dev)
6287{ 6315{
6288 struct niu *np = netdev_priv(dev); 6316 struct niu *np = netdev_priv(dev);
6289 6317
6290 niu_get_rx_stats(np); 6318 if (netif_running(dev)) {
6291 niu_get_tx_stats(np); 6319 niu_get_rx_stats(np);
6292 6320 niu_get_tx_stats(np);
6321 }
6293 return &dev->stats; 6322 return &dev->stats;
6294} 6323}
6295 6324
diff --git a/drivers/net/pch_gbe/pch_gbe.h b/drivers/net/pch_gbe/pch_gbe.h
index a0c26a99520f..e1e33c80fb25 100644
--- a/drivers/net/pch_gbe/pch_gbe.h
+++ b/drivers/net/pch_gbe/pch_gbe.h
@@ -73,7 +73,7 @@ struct pch_gbe_regs {
73 struct pch_gbe_regs_mac_adr mac_adr[16]; 73 struct pch_gbe_regs_mac_adr mac_adr[16];
74 u32 ADDR_MASK; 74 u32 ADDR_MASK;
75 u32 MIIM; 75 u32 MIIM;
76 u32 reserve2; 76 u32 MAC_ADDR_LOAD;
77 u32 RGMII_ST; 77 u32 RGMII_ST;
78 u32 RGMII_CTRL; 78 u32 RGMII_CTRL;
79 u32 reserve3[3]; 79 u32 reserve3[3];
diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c
index 1bf12339441b..b99e90aca37d 100644
--- a/drivers/net/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/pch_gbe/pch_gbe_main.c
@@ -29,6 +29,7 @@ const char pch_driver_version[] = DRV_VERSION;
29#define PCH_GBE_SHORT_PKT 64 29#define PCH_GBE_SHORT_PKT 64
30#define DSC_INIT16 0xC000 30#define DSC_INIT16 0xC000
31#define PCH_GBE_DMA_ALIGN 0 31#define PCH_GBE_DMA_ALIGN 0
32#define PCH_GBE_DMA_PADDING 2
32#define PCH_GBE_WATCHDOG_PERIOD (1 * HZ) /* watchdog time */ 33#define PCH_GBE_WATCHDOG_PERIOD (1 * HZ) /* watchdog time */
33#define PCH_GBE_COPYBREAK_DEFAULT 256 34#define PCH_GBE_COPYBREAK_DEFAULT 256
34#define PCH_GBE_PCI_BAR 1 35#define PCH_GBE_PCI_BAR 1
@@ -88,6 +89,12 @@ static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
88static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg); 89static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg);
89static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg, 90static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg,
90 int data); 91 int data);
92
93inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw)
94{
95 iowrite32(0x01, &hw->reg->MAC_ADDR_LOAD);
96}
97
91/** 98/**
92 * pch_gbe_mac_read_mac_addr - Read MAC address 99 * pch_gbe_mac_read_mac_addr - Read MAC address
93 * @hw: Pointer to the HW structure 100 * @hw: Pointer to the HW structure
@@ -519,7 +526,9 @@ static void pch_gbe_reset_task(struct work_struct *work)
519 struct pch_gbe_adapter *adapter; 526 struct pch_gbe_adapter *adapter;
520 adapter = container_of(work, struct pch_gbe_adapter, reset_task); 527 adapter = container_of(work, struct pch_gbe_adapter, reset_task);
521 528
529 rtnl_lock();
522 pch_gbe_reinit_locked(adapter); 530 pch_gbe_reinit_locked(adapter);
531 rtnl_unlock();
523} 532}
524 533
525/** 534/**
@@ -528,14 +537,8 @@ static void pch_gbe_reset_task(struct work_struct *work)
528 */ 537 */
529void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter) 538void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter)
530{ 539{
531 struct net_device *netdev = adapter->netdev; 540 pch_gbe_down(adapter);
532 541 pch_gbe_up(adapter);
533 rtnl_lock();
534 if (netif_running(netdev)) {
535 pch_gbe_down(adapter);
536 pch_gbe_up(adapter);
537 }
538 rtnl_unlock();
539} 542}
540 543
541/** 544/**
@@ -1369,16 +1372,13 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
1369 struct pch_gbe_buffer *buffer_info; 1372 struct pch_gbe_buffer *buffer_info;
1370 struct pch_gbe_rx_desc *rx_desc; 1373 struct pch_gbe_rx_desc *rx_desc;
1371 u32 length; 1374 u32 length;
1372 unsigned char tmp_packet[ETH_HLEN];
1373 unsigned int i; 1375 unsigned int i;
1374 unsigned int cleaned_count = 0; 1376 unsigned int cleaned_count = 0;
1375 bool cleaned = false; 1377 bool cleaned = false;
1376 struct sk_buff *skb; 1378 struct sk_buff *skb, *new_skb;
1377 u8 dma_status; 1379 u8 dma_status;
1378 u16 gbec_status; 1380 u16 gbec_status;
1379 u32 tcp_ip_status; 1381 u32 tcp_ip_status;
1380 u8 skb_copy_flag = 0;
1381 u8 skb_padding_flag = 0;
1382 1382
1383 i = rx_ring->next_to_clean; 1383 i = rx_ring->next_to_clean;
1384 1384
@@ -1422,55 +1422,70 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
1422 pr_err("Receive CRC Error\n"); 1422 pr_err("Receive CRC Error\n");
1423 } else { 1423 } else {
1424 /* get receive length */ 1424 /* get receive length */
1425 /* length convert[-3], padding[-2] */ 1425 /* length convert[-3] */
1426 length = (rx_desc->rx_words_eob) - 3 - 2; 1426 length = (rx_desc->rx_words_eob) - 3;
1427 1427
1428 /* Decide the data conversion method */ 1428 /* Decide the data conversion method */
1429 if (!adapter->rx_csum) { 1429 if (!adapter->rx_csum) {
1430 /* [Header:14][payload] */ 1430 /* [Header:14][payload] */
1431 skb_padding_flag = 0; 1431 if (NET_IP_ALIGN) {
1432 skb_copy_flag = 1; 1432 /* Because alignment differs,
1433 * the new_skb is newly allocated,
1434 * and data is copied to new_skb.*/
1435 new_skb = netdev_alloc_skb(netdev,
1436 length + NET_IP_ALIGN);
1437 if (!new_skb) {
1438 /* dorrop error */
1439 pr_err("New skb allocation "
1440 "Error\n");
1441 goto dorrop;
1442 }
1443 skb_reserve(new_skb, NET_IP_ALIGN);
1444 memcpy(new_skb->data, skb->data,
1445 length);
1446 skb = new_skb;
1447 } else {
1448 /* DMA buffer is used as SKB as it is.*/
1449 buffer_info->skb = NULL;
1450 }
1433 } else { 1451 } else {
1434 /* [Header:14][padding:2][payload] */ 1452 /* [Header:14][padding:2][payload] */
1435 skb_padding_flag = 1; 1453 /* The length includes padding length */
1436 if (length < copybreak) 1454 length = length - PCH_GBE_DMA_PADDING;
1437 skb_copy_flag = 1; 1455 if ((length < copybreak) ||
1438 else 1456 (NET_IP_ALIGN != PCH_GBE_DMA_PADDING)) {
1439 skb_copy_flag = 0; 1457 /* Because alignment differs,
1440 } 1458 * the new_skb is newly allocated,
1441 1459 * and data is copied to new_skb.
1442 /* Data conversion */ 1460 * Padding data is deleted
1443 if (skb_copy_flag) { /* recycle skb */ 1461 * at the time of a copy.*/
1444 struct sk_buff *new_skb; 1462 new_skb = netdev_alloc_skb(netdev,
1445 new_skb = 1463 length + NET_IP_ALIGN);
1446 netdev_alloc_skb(netdev, 1464 if (!new_skb) {
1447 length + NET_IP_ALIGN); 1465 /* dorrop error */
1448 if (new_skb) { 1466 pr_err("New skb allocation "
1449 if (!skb_padding_flag) { 1467 "Error\n");
1450 skb_reserve(new_skb, 1468 goto dorrop;
1451 NET_IP_ALIGN);
1452 } 1469 }
1470 skb_reserve(new_skb, NET_IP_ALIGN);
1453 memcpy(new_skb->data, skb->data, 1471 memcpy(new_skb->data, skb->data,
1454 length); 1472 ETH_HLEN);
1455 /* save the skb 1473 memcpy(&new_skb->data[ETH_HLEN],
1456 * in buffer_info as good */ 1474 &skb->data[ETH_HLEN +
1475 PCH_GBE_DMA_PADDING],
1476 length - ETH_HLEN);
1457 skb = new_skb; 1477 skb = new_skb;
1458 } else if (!skb_padding_flag) { 1478 } else {
1459 /* dorrop error */ 1479 /* Padding data is deleted
1460 pr_err("New skb allocation Error\n"); 1480 * by moving header data.*/
1461 goto dorrop; 1481 memmove(&skb->data[PCH_GBE_DMA_PADDING],
1482 &skb->data[0], ETH_HLEN);
1483 skb_reserve(skb, NET_IP_ALIGN);
1484 buffer_info->skb = NULL;
1462 } 1485 }
1463 } else {
1464 buffer_info->skb = NULL;
1465 } 1486 }
1466 if (skb_padding_flag) { 1487 /* The length includes FCS length */
1467 memcpy(&tmp_packet[0], &skb->data[0], ETH_HLEN); 1488 length = length - ETH_FCS_LEN;
1468 memcpy(&skb->data[NET_IP_ALIGN], &tmp_packet[0],
1469 ETH_HLEN);
1470 skb_reserve(skb, NET_IP_ALIGN);
1471
1472 }
1473
1474 /* update status of driver */ 1489 /* update status of driver */
1475 adapter->stats.rx_bytes += length; 1490 adapter->stats.rx_bytes += length;
1476 adapter->stats.rx_packets++; 1491 adapter->stats.rx_packets++;
@@ -2322,6 +2337,7 @@ static int pch_gbe_probe(struct pci_dev *pdev,
2322 netdev->features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_GRO; 2337 netdev->features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_GRO;
2323 pch_gbe_set_ethtool_ops(netdev); 2338 pch_gbe_set_ethtool_ops(netdev);
2324 2339
2340 pch_gbe_mac_load_mac_addr(&adapter->hw);
2325 pch_gbe_mac_reset_hw(&adapter->hw); 2341 pch_gbe_mac_reset_hw(&adapter->hw);
2326 2342
2327 /* setup the private structure */ 2343 /* setup the private structure */
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index 1f42f6ac8551..d3cb77205863 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -1488,12 +1488,10 @@ static void ei_rx_overrun(struct net_device *dev)
1488 1488
1489 /* 1489 /*
1490 * Wait a full Tx time (1.2ms) + some guard time, NS says 1.6ms total. 1490 * Wait a full Tx time (1.2ms) + some guard time, NS says 1.6ms total.
1491 * Early datasheets said to poll the reset bit, but now they say that 1491 * We wait at least 2ms.
1492 * it "is not a reliable indicator and subsequently should be ignored."
1493 * We wait at least 10ms.
1494 */ 1492 */
1495 1493
1496 mdelay(10); 1494 mdelay(2);
1497 1495
1498 /* 1496 /*
1499 * Reset RBCR[01] back to zero as per magic incantation. 1497 * Reset RBCR[01] back to zero as per magic incantation.
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index bde7d61f1930..469ab0b7ce31 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -973,7 +973,8 @@ static void __rtl8169_check_link_status(struct net_device *dev,
973 if (pm) 973 if (pm)
974 pm_request_resume(&tp->pci_dev->dev); 974 pm_request_resume(&tp->pci_dev->dev);
975 netif_carrier_on(dev); 975 netif_carrier_on(dev);
976 netif_info(tp, ifup, dev, "link up\n"); 976 if (net_ratelimit())
977 netif_info(tp, ifup, dev, "link up\n");
977 } else { 978 } else {
978 netif_carrier_off(dev); 979 netif_carrier_off(dev);
979 netif_info(tp, ifdown, dev, "link down\n"); 980 netif_info(tp, ifdown, dev, "link down\n");
@@ -3189,6 +3190,8 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3189 if (pci_dev_run_wake(pdev)) 3190 if (pci_dev_run_wake(pdev))
3190 pm_runtime_put_noidle(&pdev->dev); 3191 pm_runtime_put_noidle(&pdev->dev);
3191 3192
3193 netif_carrier_off(dev);
3194
3192out: 3195out:
3193 return rc; 3196 return rc;
3194 3197
@@ -3757,7 +3760,8 @@ static void rtl_hw_start_8168(struct net_device *dev)
3757 RTL_W16(IntrMitigate, 0x5151); 3760 RTL_W16(IntrMitigate, 0x5151);
3758 3761
3759 /* Work around for RxFIFO overflow. */ 3762 /* Work around for RxFIFO overflow. */
3760 if (tp->mac_version == RTL_GIGA_MAC_VER_11) { 3763 if (tp->mac_version == RTL_GIGA_MAC_VER_11 ||
3764 tp->mac_version == RTL_GIGA_MAC_VER_22) {
3761 tp->intr_event |= RxFIFOOver | PCSTimeout; 3765 tp->intr_event |= RxFIFOOver | PCSTimeout;
3762 tp->intr_event &= ~RxOverflow; 3766 tp->intr_event &= ~RxOverflow;
3763 } 3767 }
@@ -4639,12 +4643,33 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
4639 break; 4643 break;
4640 } 4644 }
4641 4645
4642 /* Work around for rx fifo overflow */ 4646 if (unlikely(status & RxFIFOOver)) {
4643 if (unlikely(status & RxFIFOOver) && 4647 switch (tp->mac_version) {
4644 (tp->mac_version == RTL_GIGA_MAC_VER_11)) { 4648 /* Work around for rx fifo overflow */
4645 netif_stop_queue(dev); 4649 case RTL_GIGA_MAC_VER_11:
4646 rtl8169_tx_timeout(dev); 4650 case RTL_GIGA_MAC_VER_22:
4647 break; 4651 case RTL_GIGA_MAC_VER_26:
4652 netif_stop_queue(dev);
4653 rtl8169_tx_timeout(dev);
4654 goto done;
4655 /* Testers needed. */
4656 case RTL_GIGA_MAC_VER_17:
4657 case RTL_GIGA_MAC_VER_19:
4658 case RTL_GIGA_MAC_VER_20:
4659 case RTL_GIGA_MAC_VER_21:
4660 case RTL_GIGA_MAC_VER_23:
4661 case RTL_GIGA_MAC_VER_24:
4662 case RTL_GIGA_MAC_VER_27:
4663 case RTL_GIGA_MAC_VER_28:
4664 /* Experimental science. Pktgen proof. */
4665 case RTL_GIGA_MAC_VER_12:
4666 case RTL_GIGA_MAC_VER_25:
4667 if (status == RxFIFOOver)
4668 goto done;
4669 break;
4670 default:
4671 break;
4672 }
4648 } 4673 }
4649 4674
4650 if (unlikely(status & SYSErr)) { 4675 if (unlikely(status & SYSErr)) {
@@ -4680,7 +4705,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
4680 (status & RxFIFOOver) ? (status | RxOverflow) : status); 4705 (status & RxFIFOOver) ? (status | RxOverflow) : status);
4681 status = RTL_R16(IntrStatus); 4706 status = RTL_R16(IntrStatus);
4682 } 4707 }
4683 4708done:
4684 return IRQ_RETVAL(handled); 4709 return IRQ_RETVAL(handled);
4685} 4710}
4686 4711
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index 5976d1d51df1..640e368ebeee 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -1777,6 +1777,7 @@ static int sis900_rx(struct net_device *net_dev)
1777 "cur_rx:%4.4d, dirty_rx:%4.4d\n", 1777 "cur_rx:%4.4d, dirty_rx:%4.4d\n",
1778 net_dev->name, sis_priv->cur_rx, 1778 net_dev->name, sis_priv->cur_rx,
1779 sis_priv->dirty_rx); 1779 sis_priv->dirty_rx);
1780 dev_kfree_skb(skb);
1780 break; 1781 break;
1781 } 1782 }
1782 1783
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index 34a0af3837f9..0e5f03135b50 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -1560,8 +1560,10 @@ static int stmmac_mac_device_setup(struct net_device *dev)
1560 1560
1561 priv->hw = device; 1561 priv->hw = device;
1562 1562
1563 if (device_can_wakeup(priv->device)) 1563 if (device_can_wakeup(priv->device)) {
1564 priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */ 1564 priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */
1565 enable_irq_wake(dev->irq);
1566 }
1565 1567
1566 return 0; 1568 return 0;
1567} 1569}
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 93b32d366611..06c0e5033656 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -11158,7 +11158,9 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11158 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 11158 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11159 break; /* We have no PHY */ 11159 break; /* We have no PHY */
11160 11160
11161 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) 11161 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) ||
11162 ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
11163 !netif_running(dev)))
11162 return -EAGAIN; 11164 return -EAGAIN;
11163 11165
11164 spin_lock_bh(&tp->lock); 11166 spin_lock_bh(&tp->lock);
@@ -11174,7 +11176,9 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11174 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 11176 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11175 break; /* We have no PHY */ 11177 break; /* We have no PHY */
11176 11178
11177 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) 11179 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) ||
11180 ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
11181 !netif_running(dev)))
11178 return -EAGAIN; 11182 return -EAGAIN;
11179 11183
11180 spin_lock_bh(&tp->lock); 11184 spin_lock_bh(&tp->lock);
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 04e8ce14a1d0..7113168473cf 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * cdc_ncm.c 2 * cdc_ncm.c
3 * 3 *
4 * Copyright (C) ST-Ericsson 2010 4 * Copyright (C) ST-Ericsson 2010-2011
5 * Contact: Alexey Orishko <alexey.orishko@stericsson.com> 5 * Contact: Alexey Orishko <alexey.orishko@stericsson.com>
6 * Original author: Hans Petter Selasky <hans.petter.selasky@stericsson.com> 6 * Original author: Hans Petter Selasky <hans.petter.selasky@stericsson.com>
7 * 7 *
@@ -54,7 +54,7 @@
54#include <linux/usb/usbnet.h> 54#include <linux/usb/usbnet.h>
55#include <linux/usb/cdc.h> 55#include <linux/usb/cdc.h>
56 56
57#define DRIVER_VERSION "17-Jan-2011" 57#define DRIVER_VERSION "7-Feb-2011"
58 58
59/* CDC NCM subclass 3.2.1 */ 59/* CDC NCM subclass 3.2.1 */
60#define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10 60#define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10
@@ -77,6 +77,9 @@
77 */ 77 */
78#define CDC_NCM_DPT_DATAGRAMS_MAX 32 78#define CDC_NCM_DPT_DATAGRAMS_MAX 32
79 79
80/* Maximum amount of IN datagrams in NTB */
81#define CDC_NCM_DPT_DATAGRAMS_IN_MAX 0 /* unlimited */
82
80/* Restart the timer, if amount of datagrams is less than given value */ 83/* Restart the timer, if amount of datagrams is less than given value */
81#define CDC_NCM_RESTART_TIMER_DATAGRAM_CNT 3 84#define CDC_NCM_RESTART_TIMER_DATAGRAM_CNT 3
82 85
@@ -85,11 +88,6 @@
85 (sizeof(struct usb_cdc_ncm_nth16) + sizeof(struct usb_cdc_ncm_ndp16) + \ 88 (sizeof(struct usb_cdc_ncm_nth16) + sizeof(struct usb_cdc_ncm_ndp16) + \
86 (CDC_NCM_DPT_DATAGRAMS_MAX + 1) * sizeof(struct usb_cdc_ncm_dpe16)) 89 (CDC_NCM_DPT_DATAGRAMS_MAX + 1) * sizeof(struct usb_cdc_ncm_dpe16))
87 90
88struct connection_speed_change {
89 __le32 USBitRate; /* holds 3GPP downlink value, bits per second */
90 __le32 DSBitRate; /* holds 3GPP uplink value, bits per second */
91} __attribute__ ((packed));
92
93struct cdc_ncm_data { 91struct cdc_ncm_data {
94 struct usb_cdc_ncm_nth16 nth16; 92 struct usb_cdc_ncm_nth16 nth16;
95 struct usb_cdc_ncm_ndp16 ndp16; 93 struct usb_cdc_ncm_ndp16 ndp16;
@@ -198,10 +196,10 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
198{ 196{
199 struct usb_cdc_notification req; 197 struct usb_cdc_notification req;
200 u32 val; 198 u32 val;
201 __le16 max_datagram_size;
202 u8 flags; 199 u8 flags;
203 u8 iface_no; 200 u8 iface_no;
204 int err; 201 int err;
202 u16 ntb_fmt_supported;
205 203
206 iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber; 204 iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber;
207 205
@@ -223,6 +221,9 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
223 ctx->tx_remainder = le16_to_cpu(ctx->ncm_parm.wNdpOutPayloadRemainder); 221 ctx->tx_remainder = le16_to_cpu(ctx->ncm_parm.wNdpOutPayloadRemainder);
224 ctx->tx_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutDivisor); 222 ctx->tx_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutDivisor);
225 ctx->tx_ndp_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutAlignment); 223 ctx->tx_ndp_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutAlignment);
224 /* devices prior to NCM Errata shall set this field to zero */
225 ctx->tx_max_datagrams = le16_to_cpu(ctx->ncm_parm.wNtbOutMaxDatagrams);
226 ntb_fmt_supported = le16_to_cpu(ctx->ncm_parm.bmNtbFormatsSupported);
226 227
227 if (ctx->func_desc != NULL) 228 if (ctx->func_desc != NULL)
228 flags = ctx->func_desc->bmNetworkCapabilities; 229 flags = ctx->func_desc->bmNetworkCapabilities;
@@ -231,22 +232,58 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
231 232
232 pr_debug("dwNtbInMaxSize=%u dwNtbOutMaxSize=%u " 233 pr_debug("dwNtbInMaxSize=%u dwNtbOutMaxSize=%u "
233 "wNdpOutPayloadRemainder=%u wNdpOutDivisor=%u " 234 "wNdpOutPayloadRemainder=%u wNdpOutDivisor=%u "
234 "wNdpOutAlignment=%u flags=0x%x\n", 235 "wNdpOutAlignment=%u wNtbOutMaxDatagrams=%u flags=0x%x\n",
235 ctx->rx_max, ctx->tx_max, ctx->tx_remainder, ctx->tx_modulus, 236 ctx->rx_max, ctx->tx_max, ctx->tx_remainder, ctx->tx_modulus,
236 ctx->tx_ndp_modulus, flags); 237 ctx->tx_ndp_modulus, ctx->tx_max_datagrams, flags);
237 238
238 /* max count of tx datagrams without terminating NULL entry */ 239 /* max count of tx datagrams */
239 ctx->tx_max_datagrams = CDC_NCM_DPT_DATAGRAMS_MAX; 240 if ((ctx->tx_max_datagrams == 0) ||
241 (ctx->tx_max_datagrams > CDC_NCM_DPT_DATAGRAMS_MAX))
242 ctx->tx_max_datagrams = CDC_NCM_DPT_DATAGRAMS_MAX;
240 243
241 /* verify maximum size of received NTB in bytes */ 244 /* verify maximum size of received NTB in bytes */
242 if ((ctx->rx_max < 245 if (ctx->rx_max < USB_CDC_NCM_NTB_MIN_IN_SIZE) {
243 (CDC_NCM_MIN_HDR_SIZE + CDC_NCM_MIN_DATAGRAM_SIZE)) || 246 pr_debug("Using min receive length=%d\n",
244 (ctx->rx_max > CDC_NCM_NTB_MAX_SIZE_RX)) { 247 USB_CDC_NCM_NTB_MIN_IN_SIZE);
248 ctx->rx_max = USB_CDC_NCM_NTB_MIN_IN_SIZE;
249 }
250
251 if (ctx->rx_max > CDC_NCM_NTB_MAX_SIZE_RX) {
245 pr_debug("Using default maximum receive length=%d\n", 252 pr_debug("Using default maximum receive length=%d\n",
246 CDC_NCM_NTB_MAX_SIZE_RX); 253 CDC_NCM_NTB_MAX_SIZE_RX);
247 ctx->rx_max = CDC_NCM_NTB_MAX_SIZE_RX; 254 ctx->rx_max = CDC_NCM_NTB_MAX_SIZE_RX;
248 } 255 }
249 256
257 /* inform device about NTB input size changes */
258 if (ctx->rx_max != le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize)) {
259 req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT |
260 USB_RECIP_INTERFACE;
261 req.bNotificationType = USB_CDC_SET_NTB_INPUT_SIZE;
262 req.wValue = 0;
263 req.wIndex = cpu_to_le16(iface_no);
264
265 if (flags & USB_CDC_NCM_NCAP_NTB_INPUT_SIZE) {
266 struct usb_cdc_ncm_ndp_input_size ndp_in_sz;
267
268 req.wLength = 8;
269 ndp_in_sz.dwNtbInMaxSize = cpu_to_le32(ctx->rx_max);
270 ndp_in_sz.wNtbInMaxDatagrams =
271 cpu_to_le16(CDC_NCM_DPT_DATAGRAMS_MAX);
272 ndp_in_sz.wReserved = 0;
273 err = cdc_ncm_do_request(ctx, &req, &ndp_in_sz, 0, NULL,
274 1000);
275 } else {
276 __le32 dwNtbInMaxSize = cpu_to_le32(ctx->rx_max);
277
278 req.wLength = 4;
279 err = cdc_ncm_do_request(ctx, &req, &dwNtbInMaxSize, 0,
280 NULL, 1000);
281 }
282
283 if (err)
284 pr_debug("Setting NTB Input Size failed\n");
285 }
286
250 /* verify maximum size of transmitted NTB in bytes */ 287 /* verify maximum size of transmitted NTB in bytes */
251 if ((ctx->tx_max < 288 if ((ctx->tx_max <
252 (CDC_NCM_MIN_HDR_SIZE + CDC_NCM_MIN_DATAGRAM_SIZE)) || 289 (CDC_NCM_MIN_HDR_SIZE + CDC_NCM_MIN_DATAGRAM_SIZE)) ||
@@ -297,47 +334,84 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
297 /* additional configuration */ 334 /* additional configuration */
298 335
299 /* set CRC Mode */ 336 /* set CRC Mode */
300 req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE; 337 if (flags & USB_CDC_NCM_NCAP_CRC_MODE) {
301 req.bNotificationType = USB_CDC_SET_CRC_MODE; 338 req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT |
302 req.wValue = cpu_to_le16(USB_CDC_NCM_CRC_NOT_APPENDED); 339 USB_RECIP_INTERFACE;
303 req.wIndex = cpu_to_le16(iface_no); 340 req.bNotificationType = USB_CDC_SET_CRC_MODE;
304 req.wLength = 0; 341 req.wValue = cpu_to_le16(USB_CDC_NCM_CRC_NOT_APPENDED);
305 342 req.wIndex = cpu_to_le16(iface_no);
306 err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000); 343 req.wLength = 0;
307 if (err) 344
308 pr_debug("Setting CRC mode off failed\n"); 345 err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000);
346 if (err)
347 pr_debug("Setting CRC mode off failed\n");
348 }
309 349
310 /* set NTB format */ 350 /* set NTB format, if both formats are supported */
311 req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE; 351 if (ntb_fmt_supported & USB_CDC_NCM_NTH32_SIGN) {
312 req.bNotificationType = USB_CDC_SET_NTB_FORMAT; 352 req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT |
313 req.wValue = cpu_to_le16(USB_CDC_NCM_NTB16_FORMAT); 353 USB_RECIP_INTERFACE;
314 req.wIndex = cpu_to_le16(iface_no); 354 req.bNotificationType = USB_CDC_SET_NTB_FORMAT;
315 req.wLength = 0; 355 req.wValue = cpu_to_le16(USB_CDC_NCM_NTB16_FORMAT);
356 req.wIndex = cpu_to_le16(iface_no);
357 req.wLength = 0;
358
359 err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000);
360 if (err)
361 pr_debug("Setting NTB format to 16-bit failed\n");
362 }
316 363
317 err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000); 364 ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE;
318 if (err)
319 pr_debug("Setting NTB format to 16-bit failed\n");
320 365
321 /* set Max Datagram Size (MTU) */ 366 /* set Max Datagram Size (MTU) */
322 req.bmRequestType = USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE; 367 if (flags & USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE) {
323 req.bNotificationType = USB_CDC_GET_MAX_DATAGRAM_SIZE; 368 __le16 max_datagram_size;
324 req.wValue = 0; 369 u16 eth_max_sz = le16_to_cpu(ctx->ether_desc->wMaxSegmentSize);
325 req.wIndex = cpu_to_le16(iface_no); 370
326 req.wLength = cpu_to_le16(2); 371 req.bmRequestType = USB_TYPE_CLASS | USB_DIR_IN |
372 USB_RECIP_INTERFACE;
373 req.bNotificationType = USB_CDC_GET_MAX_DATAGRAM_SIZE;
374 req.wValue = 0;
375 req.wIndex = cpu_to_le16(iface_no);
376 req.wLength = cpu_to_le16(2);
377
378 err = cdc_ncm_do_request(ctx, &req, &max_datagram_size, 0, NULL,
379 1000);
380 if (err) {
381 pr_debug("GET_MAX_DATAGRAM_SIZE failed, use size=%u\n",
382 CDC_NCM_MIN_DATAGRAM_SIZE);
383 } else {
384 ctx->max_datagram_size = le16_to_cpu(max_datagram_size);
385 /* Check Eth descriptor value */
386 if (eth_max_sz < CDC_NCM_MAX_DATAGRAM_SIZE) {
387 if (ctx->max_datagram_size > eth_max_sz)
388 ctx->max_datagram_size = eth_max_sz;
389 } else {
390 if (ctx->max_datagram_size >
391 CDC_NCM_MAX_DATAGRAM_SIZE)
392 ctx->max_datagram_size =
393 CDC_NCM_MAX_DATAGRAM_SIZE;
394 }
327 395
328 err = cdc_ncm_do_request(ctx, &req, &max_datagram_size, 0, NULL, 1000); 396 if (ctx->max_datagram_size < CDC_NCM_MIN_DATAGRAM_SIZE)
329 if (err) { 397 ctx->max_datagram_size =
330 pr_debug(" GET_MAX_DATAGRAM_SIZE failed, using size=%u\n", 398 CDC_NCM_MIN_DATAGRAM_SIZE;
331 CDC_NCM_MIN_DATAGRAM_SIZE); 399
332 /* use default */ 400 /* if value changed, update device */
333 ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE; 401 req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT |
334 } else { 402 USB_RECIP_INTERFACE;
335 ctx->max_datagram_size = le16_to_cpu(max_datagram_size); 403 req.bNotificationType = USB_CDC_SET_MAX_DATAGRAM_SIZE;
404 req.wValue = 0;
405 req.wIndex = cpu_to_le16(iface_no);
406 req.wLength = 2;
407 max_datagram_size = cpu_to_le16(ctx->max_datagram_size);
408
409 err = cdc_ncm_do_request(ctx, &req, &max_datagram_size,
410 0, NULL, 1000);
411 if (err)
412 pr_debug("SET_MAX_DATAGRAM_SIZE failed\n");
413 }
336 414
337 if (ctx->max_datagram_size < CDC_NCM_MIN_DATAGRAM_SIZE)
338 ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE;
339 else if (ctx->max_datagram_size > CDC_NCM_MAX_DATAGRAM_SIZE)
340 ctx->max_datagram_size = CDC_NCM_MAX_DATAGRAM_SIZE;
341 } 415 }
342 416
343 if (ctx->netdev->mtu != (ctx->max_datagram_size - ETH_HLEN)) 417 if (ctx->netdev->mtu != (ctx->max_datagram_size - ETH_HLEN))
@@ -466,19 +540,13 @@ static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
466 540
467 ctx->ether_desc = 541 ctx->ether_desc =
468 (const struct usb_cdc_ether_desc *)buf; 542 (const struct usb_cdc_ether_desc *)buf;
469
470 dev->hard_mtu = 543 dev->hard_mtu =
471 le16_to_cpu(ctx->ether_desc->wMaxSegmentSize); 544 le16_to_cpu(ctx->ether_desc->wMaxSegmentSize);
472 545
473 if (dev->hard_mtu < 546 if (dev->hard_mtu < CDC_NCM_MIN_DATAGRAM_SIZE)
474 (CDC_NCM_MIN_DATAGRAM_SIZE - ETH_HLEN)) 547 dev->hard_mtu = CDC_NCM_MIN_DATAGRAM_SIZE;
475 dev->hard_mtu = 548 else if (dev->hard_mtu > CDC_NCM_MAX_DATAGRAM_SIZE)
476 CDC_NCM_MIN_DATAGRAM_SIZE - ETH_HLEN; 549 dev->hard_mtu = CDC_NCM_MAX_DATAGRAM_SIZE;
477
478 else if (dev->hard_mtu >
479 (CDC_NCM_MAX_DATAGRAM_SIZE - ETH_HLEN))
480 dev->hard_mtu =
481 CDC_NCM_MAX_DATAGRAM_SIZE - ETH_HLEN;
482 break; 550 break;
483 551
484 case USB_CDC_NCM_TYPE: 552 case USB_CDC_NCM_TYPE:
@@ -628,13 +696,13 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
628 u32 offset; 696 u32 offset;
629 u32 last_offset; 697 u32 last_offset;
630 u16 n = 0; 698 u16 n = 0;
631 u8 timeout = 0; 699 u8 ready2send = 0;
632 700
633 /* if there is a remaining skb, it gets priority */ 701 /* if there is a remaining skb, it gets priority */
634 if (skb != NULL) 702 if (skb != NULL)
635 swap(skb, ctx->tx_rem_skb); 703 swap(skb, ctx->tx_rem_skb);
636 else 704 else
637 timeout = 1; 705 ready2send = 1;
638 706
639 /* 707 /*
640 * +----------------+ 708 * +----------------+
@@ -682,9 +750,10 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
682 750
683 for (; n < ctx->tx_max_datagrams; n++) { 751 for (; n < ctx->tx_max_datagrams; n++) {
684 /* check if end of transmit buffer is reached */ 752 /* check if end of transmit buffer is reached */
685 if (offset >= ctx->tx_max) 753 if (offset >= ctx->tx_max) {
754 ready2send = 1;
686 break; 755 break;
687 756 }
688 /* compute maximum buffer size */ 757 /* compute maximum buffer size */
689 rem = ctx->tx_max - offset; 758 rem = ctx->tx_max - offset;
690 759
@@ -711,9 +780,7 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
711 } 780 }
712 ctx->tx_rem_skb = skb; 781 ctx->tx_rem_skb = skb;
713 skb = NULL; 782 skb = NULL;
714 783 ready2send = 1;
715 /* loop one more time */
716 timeout = 1;
717 } 784 }
718 break; 785 break;
719 } 786 }
@@ -756,7 +823,7 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
756 ctx->tx_curr_last_offset = last_offset; 823 ctx->tx_curr_last_offset = last_offset;
757 goto exit_no_skb; 824 goto exit_no_skb;
758 825
759 } else if ((n < ctx->tx_max_datagrams) && (timeout == 0)) { 826 } else if ((n < ctx->tx_max_datagrams) && (ready2send == 0)) {
760 /* wait for more frames */ 827 /* wait for more frames */
761 /* push variables */ 828 /* push variables */
762 ctx->tx_curr_skb = skb_out; 829 ctx->tx_curr_skb = skb_out;
@@ -813,7 +880,7 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
813 cpu_to_le16(sizeof(ctx->tx_ncm.nth16)); 880 cpu_to_le16(sizeof(ctx->tx_ncm.nth16));
814 ctx->tx_ncm.nth16.wSequence = cpu_to_le16(ctx->tx_seq); 881 ctx->tx_ncm.nth16.wSequence = cpu_to_le16(ctx->tx_seq);
815 ctx->tx_ncm.nth16.wBlockLength = cpu_to_le16(last_offset); 882 ctx->tx_ncm.nth16.wBlockLength = cpu_to_le16(last_offset);
816 ctx->tx_ncm.nth16.wFpIndex = ALIGN(sizeof(struct usb_cdc_ncm_nth16), 883 ctx->tx_ncm.nth16.wNdpIndex = ALIGN(sizeof(struct usb_cdc_ncm_nth16),
817 ctx->tx_ndp_modulus); 884 ctx->tx_ndp_modulus);
818 885
819 memcpy(skb_out->data, &(ctx->tx_ncm.nth16), sizeof(ctx->tx_ncm.nth16)); 886 memcpy(skb_out->data, &(ctx->tx_ncm.nth16), sizeof(ctx->tx_ncm.nth16));
@@ -825,13 +892,13 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
825 rem = sizeof(ctx->tx_ncm.ndp16) + ((ctx->tx_curr_frame_num + 1) * 892 rem = sizeof(ctx->tx_ncm.ndp16) + ((ctx->tx_curr_frame_num + 1) *
826 sizeof(struct usb_cdc_ncm_dpe16)); 893 sizeof(struct usb_cdc_ncm_dpe16));
827 ctx->tx_ncm.ndp16.wLength = cpu_to_le16(rem); 894 ctx->tx_ncm.ndp16.wLength = cpu_to_le16(rem);
828 ctx->tx_ncm.ndp16.wNextFpIndex = 0; /* reserved */ 895 ctx->tx_ncm.ndp16.wNextNdpIndex = 0; /* reserved */
829 896
830 memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wFpIndex, 897 memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wNdpIndex,
831 &(ctx->tx_ncm.ndp16), 898 &(ctx->tx_ncm.ndp16),
832 sizeof(ctx->tx_ncm.ndp16)); 899 sizeof(ctx->tx_ncm.ndp16));
833 900
834 memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wFpIndex + 901 memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wNdpIndex +
835 sizeof(ctx->tx_ncm.ndp16), 902 sizeof(ctx->tx_ncm.ndp16),
836 &(ctx->tx_ncm.dpe16), 903 &(ctx->tx_ncm.dpe16),
837 (ctx->tx_curr_frame_num + 1) * 904 (ctx->tx_curr_frame_num + 1) *
@@ -961,7 +1028,7 @@ static int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
961 goto error; 1028 goto error;
962 } 1029 }
963 1030
964 temp = le16_to_cpu(ctx->rx_ncm.nth16.wFpIndex); 1031 temp = le16_to_cpu(ctx->rx_ncm.nth16.wNdpIndex);
965 if ((temp + sizeof(ctx->rx_ncm.ndp16)) > actlen) { 1032 if ((temp + sizeof(ctx->rx_ncm.ndp16)) > actlen) {
966 pr_debug("invalid DPT16 index\n"); 1033 pr_debug("invalid DPT16 index\n");
967 goto error; 1034 goto error;
@@ -1048,10 +1115,10 @@ error:
1048 1115
1049static void 1116static void
1050cdc_ncm_speed_change(struct cdc_ncm_ctx *ctx, 1117cdc_ncm_speed_change(struct cdc_ncm_ctx *ctx,
1051 struct connection_speed_change *data) 1118 struct usb_cdc_speed_change *data)
1052{ 1119{
1053 uint32_t rx_speed = le32_to_cpu(data->USBitRate); 1120 uint32_t rx_speed = le32_to_cpu(data->DLBitRRate);
1054 uint32_t tx_speed = le32_to_cpu(data->DSBitRate); 1121 uint32_t tx_speed = le32_to_cpu(data->ULBitRate);
1055 1122
1056 /* 1123 /*
1057 * Currently the USB-NET API does not support reporting the actual 1124 * Currently the USB-NET API does not support reporting the actual
@@ -1092,7 +1159,7 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
1092 /* test for split data in 8-byte chunks */ 1159 /* test for split data in 8-byte chunks */
1093 if (test_and_clear_bit(EVENT_STS_SPLIT, &dev->flags)) { 1160 if (test_and_clear_bit(EVENT_STS_SPLIT, &dev->flags)) {
1094 cdc_ncm_speed_change(ctx, 1161 cdc_ncm_speed_change(ctx,
1095 (struct connection_speed_change *)urb->transfer_buffer); 1162 (struct usb_cdc_speed_change *)urb->transfer_buffer);
1096 return; 1163 return;
1097 } 1164 }
1098 1165
@@ -1120,12 +1187,12 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
1120 break; 1187 break;
1121 1188
1122 case USB_CDC_NOTIFY_SPEED_CHANGE: 1189 case USB_CDC_NOTIFY_SPEED_CHANGE:
1123 if (urb->actual_length < 1190 if (urb->actual_length < (sizeof(*event) +
1124 (sizeof(*event) + sizeof(struct connection_speed_change))) 1191 sizeof(struct usb_cdc_speed_change)))
1125 set_bit(EVENT_STS_SPLIT, &dev->flags); 1192 set_bit(EVENT_STS_SPLIT, &dev->flags);
1126 else 1193 else
1127 cdc_ncm_speed_change(ctx, 1194 cdc_ncm_speed_change(ctx,
1128 (struct connection_speed_change *) &event[1]); 1195 (struct usb_cdc_speed_change *) &event[1]);
1129 break; 1196 break;
1130 1197
1131 default: 1198 default:
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index bed8fcedff49..6d83812603b6 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -2628,15 +2628,15 @@ exit:
2628 2628
2629static void hso_free_tiomget(struct hso_serial *serial) 2629static void hso_free_tiomget(struct hso_serial *serial)
2630{ 2630{
2631 struct hso_tiocmget *tiocmget = serial->tiocmget; 2631 struct hso_tiocmget *tiocmget;
2632 if (!serial)
2633 return;
2634 tiocmget = serial->tiocmget;
2632 if (tiocmget) { 2635 if (tiocmget) {
2633 if (tiocmget->urb) { 2636 usb_free_urb(tiocmget->urb);
2634 usb_free_urb(tiocmget->urb); 2637 tiocmget->urb = NULL;
2635 tiocmget->urb = NULL;
2636 }
2637 serial->tiocmget = NULL; 2638 serial->tiocmget = NULL;
2638 kfree(tiocmget); 2639 kfree(tiocmget);
2639
2640 } 2640 }
2641} 2641}
2642 2642
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index ed9a41643ff4..95c41d56631c 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -931,8 +931,10 @@ fail_halt:
931 if (urb != NULL) { 931 if (urb != NULL) {
932 clear_bit (EVENT_RX_MEMORY, &dev->flags); 932 clear_bit (EVENT_RX_MEMORY, &dev->flags);
933 status = usb_autopm_get_interface(dev->intf); 933 status = usb_autopm_get_interface(dev->intf);
934 if (status < 0) 934 if (status < 0) {
935 usb_free_urb(urb);
935 goto fail_lowmem; 936 goto fail_lowmem;
937 }
936 if (rx_submit (dev, urb, GFP_KERNEL) == -ENOLINK) 938 if (rx_submit (dev, urb, GFP_KERNEL) == -ENOLINK)
937 resched = 0; 939 resched = 0;
938 usb_autopm_put_interface(dev->intf); 940 usb_autopm_put_interface(dev->intf);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 90a23e410d1b..82dba5aaf423 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -446,6 +446,20 @@ static void skb_recv_done(struct virtqueue *rvq)
446 } 446 }
447} 447}
448 448
449static void virtnet_napi_enable(struct virtnet_info *vi)
450{
451 napi_enable(&vi->napi);
452
453 /* If all buffers were filled by other side before we napi_enabled, we
454 * won't get another interrupt, so process any outstanding packets
455 * now. virtnet_poll wants re-enable the queue, so we disable here.
456 * We synchronize against interrupts via NAPI_STATE_SCHED */
457 if (napi_schedule_prep(&vi->napi)) {
458 virtqueue_disable_cb(vi->rvq);
459 __napi_schedule(&vi->napi);
460 }
461}
462
449static void refill_work(struct work_struct *work) 463static void refill_work(struct work_struct *work)
450{ 464{
451 struct virtnet_info *vi; 465 struct virtnet_info *vi;
@@ -454,7 +468,7 @@ static void refill_work(struct work_struct *work)
454 vi = container_of(work, struct virtnet_info, refill.work); 468 vi = container_of(work, struct virtnet_info, refill.work);
455 napi_disable(&vi->napi); 469 napi_disable(&vi->napi);
456 still_empty = !try_fill_recv(vi, GFP_KERNEL); 470 still_empty = !try_fill_recv(vi, GFP_KERNEL);
457 napi_enable(&vi->napi); 471 virtnet_napi_enable(vi);
458 472
459 /* In theory, this can happen: if we don't get any buffers in 473 /* In theory, this can happen: if we don't get any buffers in
460 * we will *never* try to fill again. */ 474 * we will *never* try to fill again. */
@@ -638,16 +652,7 @@ static int virtnet_open(struct net_device *dev)
638{ 652{
639 struct virtnet_info *vi = netdev_priv(dev); 653 struct virtnet_info *vi = netdev_priv(dev);
640 654
641 napi_enable(&vi->napi); 655 virtnet_napi_enable(vi);
642
643 /* If all buffers were filled by other side before we napi_enabled, we
644 * won't get another interrupt, so process any outstanding packets
645 * now. virtnet_poll wants re-enable the queue, so we disable here.
646 * We synchronize against interrupts via NAPI_STATE_SCHED */
647 if (napi_schedule_prep(&vi->napi)) {
648 virtqueue_disable_cb(vi->rvq);
649 __napi_schedule(&vi->napi);
650 }
651 return 0; 656 return 0;
652} 657}
653 658
diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/vxge/vxge-config.c
index 01c05f53e2f9..228d4f7a58af 100644
--- a/drivers/net/vxge/vxge-config.c
+++ b/drivers/net/vxge/vxge-config.c
@@ -3690,7 +3690,7 @@ __vxge_hw_vpath_rts_table_get(struct __vxge_hw_vpath_handle *vp,
3690 if (status != VXGE_HW_OK) 3690 if (status != VXGE_HW_OK)
3691 goto exit; 3691 goto exit;
3692 3692
3693 if ((rts_table != VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) || 3693 if ((rts_table != VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) &&
3694 (rts_table != 3694 (rts_table !=
3695 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) 3695 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT))
3696 *data1 = 0; 3696 *data1 = 0;
diff --git a/drivers/net/wireless/ath/ath5k/dma.c b/drivers/net/wireless/ath/ath5k/dma.c
index 0064be7ce5c9..21091c26a9a5 100644
--- a/drivers/net/wireless/ath/ath5k/dma.c
+++ b/drivers/net/wireless/ath/ath5k/dma.c
@@ -838,9 +838,9 @@ int ath5k_hw_dma_stop(struct ath5k_hw *ah)
838 for (i = 0; i < qmax; i++) { 838 for (i = 0; i < qmax; i++) {
839 err = ath5k_hw_stop_tx_dma(ah, i); 839 err = ath5k_hw_stop_tx_dma(ah, i);
840 /* -EINVAL -> queue inactive */ 840 /* -EINVAL -> queue inactive */
841 if (err != -EINVAL) 841 if (err && err != -EINVAL)
842 return err; 842 return err;
843 } 843 }
844 844
845 return err; 845 return 0;
846} 846}
diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c
index e5f2b96a4c63..a702817daf72 100644
--- a/drivers/net/wireless/ath/ath5k/pcu.c
+++ b/drivers/net/wireless/ath/ath5k/pcu.c
@@ -86,7 +86,7 @@ int ath5k_hw_get_frame_duration(struct ath5k_hw *ah,
86 if (!ah->ah_bwmode) { 86 if (!ah->ah_bwmode) {
87 dur = ieee80211_generic_frame_duration(sc->hw, 87 dur = ieee80211_generic_frame_duration(sc->hw,
88 NULL, len, rate); 88 NULL, len, rate);
89 return dur; 89 return le16_to_cpu(dur);
90 } 90 }
91 91
92 bitrate = rate->bitrate; 92 bitrate = rate->bitrate;
@@ -265,8 +265,6 @@ static inline void ath5k_hw_write_rate_duration(struct ath5k_hw *ah)
265 * what rate we should choose to TX ACKs. */ 265 * what rate we should choose to TX ACKs. */
266 tx_time = ath5k_hw_get_frame_duration(ah, 10, rate); 266 tx_time = ath5k_hw_get_frame_duration(ah, 10, rate);
267 267
268 tx_time = le16_to_cpu(tx_time);
269
270 ath5k_hw_reg_write(ah, tx_time, reg); 268 ath5k_hw_reg_write(ah, tx_time, reg);
271 269
272 if (!(rate->flags & IEEE80211_RATE_SHORT_PREAMBLE)) 270 if (!(rate->flags & IEEE80211_RATE_SHORT_PREAMBLE))
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
index f8a7771faee2..f44c84ab5dce 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
@@ -426,9 +426,8 @@ static void ar9002_hw_configpcipowersave(struct ath_hw *ah,
426 } 426 }
427 427
428 /* WAR for ASPM system hang */ 428 /* WAR for ASPM system hang */
429 if (AR_SREV_9280(ah) || AR_SREV_9285(ah) || AR_SREV_9287(ah)) { 429 if (AR_SREV_9285(ah) || AR_SREV_9287(ah))
430 val |= (AR_WA_BIT6 | AR_WA_BIT7); 430 val |= (AR_WA_BIT6 | AR_WA_BIT7);
431 }
432 431
433 if (AR_SREV_9285E_20(ah)) 432 if (AR_SREV_9285E_20(ah))
434 val |= AR_WA_BIT23; 433 val |= AR_WA_BIT23;
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 3681caf54282..23838e37d45f 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -218,6 +218,7 @@ struct ath_frame_info {
218struct ath_buf_state { 218struct ath_buf_state {
219 u8 bf_type; 219 u8 bf_type;
220 u8 bfs_paprd; 220 u8 bfs_paprd;
221 unsigned long bfs_paprd_timestamp;
221 enum ath9k_internal_frame_type bfs_ftype; 222 enum ath9k_internal_frame_type bfs_ftype;
222}; 223};
223 224
@@ -593,7 +594,6 @@ struct ath_softc {
593 struct work_struct paprd_work; 594 struct work_struct paprd_work;
594 struct work_struct hw_check_work; 595 struct work_struct hw_check_work;
595 struct completion paprd_complete; 596 struct completion paprd_complete;
596 bool paprd_pending;
597 597
598 u32 intrstatus; 598 u32 intrstatus;
599 u32 sc_flags; /* SC_OP_* */ 599 u32 sc_flags; /* SC_OP_* */
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index 38433f9bfe59..0352f0994caa 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -142,9 +142,6 @@ static void ath9k_deinit_priv(struct ath9k_htc_priv *priv)
142{ 142{
143 ath9k_htc_exit_debug(priv->ah); 143 ath9k_htc_exit_debug(priv->ah);
144 ath9k_hw_deinit(priv->ah); 144 ath9k_hw_deinit(priv->ah);
145 tasklet_kill(&priv->swba_tasklet);
146 tasklet_kill(&priv->rx_tasklet);
147 tasklet_kill(&priv->tx_tasklet);
148 kfree(priv->ah); 145 kfree(priv->ah);
149 priv->ah = NULL; 146 priv->ah = NULL;
150} 147}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index f4d576bc3ccd..6bb59958f71e 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -1025,12 +1025,6 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw)
1025 int ret = 0; 1025 int ret = 0;
1026 u8 cmd_rsp; 1026 u8 cmd_rsp;
1027 1027
1028 /* Cancel all the running timers/work .. */
1029 cancel_work_sync(&priv->fatal_work);
1030 cancel_work_sync(&priv->ps_work);
1031 cancel_delayed_work_sync(&priv->ath9k_led_blink_work);
1032 ath9k_led_stop_brightness(priv);
1033
1034 mutex_lock(&priv->mutex); 1028 mutex_lock(&priv->mutex);
1035 1029
1036 if (priv->op_flags & OP_INVALID) { 1030 if (priv->op_flags & OP_INVALID) {
@@ -1044,8 +1038,23 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw)
1044 WMI_CMD(WMI_DISABLE_INTR_CMDID); 1038 WMI_CMD(WMI_DISABLE_INTR_CMDID);
1045 WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID); 1039 WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID);
1046 WMI_CMD(WMI_STOP_RECV_CMDID); 1040 WMI_CMD(WMI_STOP_RECV_CMDID);
1041
1042 tasklet_kill(&priv->swba_tasklet);
1043 tasklet_kill(&priv->rx_tasklet);
1044 tasklet_kill(&priv->tx_tasklet);
1045
1047 skb_queue_purge(&priv->tx_queue); 1046 skb_queue_purge(&priv->tx_queue);
1048 1047
1048 mutex_unlock(&priv->mutex);
1049
1050 /* Cancel all the running timers/work .. */
1051 cancel_work_sync(&priv->fatal_work);
1052 cancel_work_sync(&priv->ps_work);
1053 cancel_delayed_work_sync(&priv->ath9k_led_blink_work);
1054 ath9k_led_stop_brightness(priv);
1055
1056 mutex_lock(&priv->mutex);
1057
1049 /* Remove monitor interface here */ 1058 /* Remove monitor interface here */
1050 if (ah->opmode == NL80211_IFTYPE_MONITOR) { 1059 if (ah->opmode == NL80211_IFTYPE_MONITOR) {
1051 if (ath9k_htc_remove_monitor_interface(priv)) 1060 if (ath9k_htc_remove_monitor_interface(priv))
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 767d8b86f1e1..087a6a95edd5 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -598,8 +598,6 @@ err_btcoex:
598err_queues: 598err_queues:
599 ath9k_hw_deinit(ah); 599 ath9k_hw_deinit(ah);
600err_hw: 600err_hw:
601 tasklet_kill(&sc->intr_tq);
602 tasklet_kill(&sc->bcon_tasklet);
603 601
604 kfree(ah); 602 kfree(ah);
605 sc->sc_ah = NULL; 603 sc->sc_ah = NULL;
@@ -807,9 +805,6 @@ static void ath9k_deinit_softc(struct ath_softc *sc)
807 805
808 ath9k_hw_deinit(sc->sc_ah); 806 ath9k_hw_deinit(sc->sc_ah);
809 807
810 tasklet_kill(&sc->intr_tq);
811 tasklet_kill(&sc->bcon_tasklet);
812
813 kfree(sc->sc_ah); 808 kfree(sc->sc_ah);
814 sc->sc_ah = NULL; 809 sc->sc_ah = NULL;
815} 810}
@@ -824,6 +819,8 @@ void ath9k_deinit_device(struct ath_softc *sc)
824 wiphy_rfkill_stop_polling(sc->hw->wiphy); 819 wiphy_rfkill_stop_polling(sc->hw->wiphy);
825 ath_deinit_leds(sc); 820 ath_deinit_leds(sc);
826 821
822 ath9k_ps_restore(sc);
823
827 for (i = 0; i < sc->num_sec_wiphy; i++) { 824 for (i = 0; i < sc->num_sec_wiphy; i++) {
828 struct ath_wiphy *aphy = sc->sec_wiphy[i]; 825 struct ath_wiphy *aphy = sc->sec_wiphy[i];
829 if (aphy == NULL) 826 if (aphy == NULL)
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index c79c97be6cd4..da5c64597c1f 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -325,6 +325,8 @@ static bool ath_paprd_send_frame(struct ath_softc *sc, struct sk_buff *skb, int
325{ 325{
326 struct ieee80211_hw *hw = sc->hw; 326 struct ieee80211_hw *hw = sc->hw;
327 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 327 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
328 struct ath_hw *ah = sc->sc_ah;
329 struct ath_common *common = ath9k_hw_common(ah);
328 struct ath_tx_control txctl; 330 struct ath_tx_control txctl;
329 int time_left; 331 int time_left;
330 332
@@ -340,14 +342,16 @@ static bool ath_paprd_send_frame(struct ath_softc *sc, struct sk_buff *skb, int
340 tx_info->control.rates[1].idx = -1; 342 tx_info->control.rates[1].idx = -1;
341 343
342 init_completion(&sc->paprd_complete); 344 init_completion(&sc->paprd_complete);
343 sc->paprd_pending = true;
344 txctl.paprd = BIT(chain); 345 txctl.paprd = BIT(chain);
345 if (ath_tx_start(hw, skb, &txctl) != 0) 346
347 if (ath_tx_start(hw, skb, &txctl) != 0) {
348 ath_dbg(common, ATH_DBG_XMIT, "PAPRD TX failed\n");
349 dev_kfree_skb_any(skb);
346 return false; 350 return false;
351 }
347 352
348 time_left = wait_for_completion_timeout(&sc->paprd_complete, 353 time_left = wait_for_completion_timeout(&sc->paprd_complete,
349 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)); 354 msecs_to_jiffies(ATH_PAPRD_TIMEOUT));
350 sc->paprd_pending = false;
351 355
352 if (!time_left) 356 if (!time_left)
353 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_CALIBRATE, 357 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_CALIBRATE,
@@ -953,8 +957,6 @@ void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw)
953 957
954 spin_unlock_bh(&sc->sc_pcu_lock); 958 spin_unlock_bh(&sc->sc_pcu_lock);
955 ath9k_ps_restore(sc); 959 ath9k_ps_restore(sc);
956
957 ath9k_setpower(sc, ATH9K_PM_FULL_SLEEP);
958} 960}
959 961
960int ath_reset(struct ath_softc *sc, bool retry_tx) 962int ath_reset(struct ath_softc *sc, bool retry_tx)
@@ -1309,6 +1311,9 @@ static void ath9k_stop(struct ieee80211_hw *hw)
1309 1311
1310 spin_lock_bh(&sc->sc_pcu_lock); 1312 spin_lock_bh(&sc->sc_pcu_lock);
1311 1313
1314 /* prevent tasklets to enable interrupts once we disable them */
1315 ah->imask &= ~ATH9K_INT_GLOBAL;
1316
1312 /* make sure h/w will not generate any interrupt 1317 /* make sure h/w will not generate any interrupt
1313 * before setting the invalid flag. */ 1318 * before setting the invalid flag. */
1314 ath9k_hw_disable_interrupts(ah); 1319 ath9k_hw_disable_interrupts(ah);
@@ -1326,6 +1331,12 @@ static void ath9k_stop(struct ieee80211_hw *hw)
1326 1331
1327 spin_unlock_bh(&sc->sc_pcu_lock); 1332 spin_unlock_bh(&sc->sc_pcu_lock);
1328 1333
1334 /* we can now sync irq and kill any running tasklets, since we already
1335 * disabled interrupts and not holding a spin lock */
1336 synchronize_irq(sc->irq);
1337 tasklet_kill(&sc->intr_tq);
1338 tasklet_kill(&sc->bcon_tasklet);
1339
1329 ath9k_ps_restore(sc); 1340 ath9k_ps_restore(sc);
1330 1341
1331 sc->ps_idle = true; 1342 sc->ps_idle = true;
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 33a37edbaf79..07b7804aec5b 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1725,6 +1725,9 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1725 ar9003_hw_set_paprd_txdesc(sc->sc_ah, bf->bf_desc, 1725 ar9003_hw_set_paprd_txdesc(sc->sc_ah, bf->bf_desc,
1726 bf->bf_state.bfs_paprd); 1726 bf->bf_state.bfs_paprd);
1727 1727
1728 if (txctl->paprd)
1729 bf->bf_state.bfs_paprd_timestamp = jiffies;
1730
1728 ath_tx_send_normal(sc, txctl->txq, tid, &bf_head); 1731 ath_tx_send_normal(sc, txctl->txq, tid, &bf_head);
1729 } 1732 }
1730 1733
@@ -1886,7 +1889,9 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
1886 bf->bf_buf_addr = 0; 1889 bf->bf_buf_addr = 0;
1887 1890
1888 if (bf->bf_state.bfs_paprd) { 1891 if (bf->bf_state.bfs_paprd) {
1889 if (!sc->paprd_pending) 1892 if (time_after(jiffies,
1893 bf->bf_state.bfs_paprd_timestamp +
1894 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
1890 dev_kfree_skb_any(skb); 1895 dev_kfree_skb_any(skb);
1891 else 1896 else
1892 complete(&sc->paprd_complete); 1897 complete(&sc->paprd_complete);
diff --git a/drivers/net/wireless/ath/carl9170/rx.c b/drivers/net/wireless/ath/carl9170/rx.c
index 939a0e96ed1f..84866a4b8350 100644
--- a/drivers/net/wireless/ath/carl9170/rx.c
+++ b/drivers/net/wireless/ath/carl9170/rx.c
@@ -564,7 +564,7 @@ static void carl9170_ps_beacon(struct ar9170 *ar, void *data, unsigned int len)
564 cam = ieee80211_check_tim(tim_ie, tim_len, ar->common.curaid); 564 cam = ieee80211_check_tim(tim_ie, tim_len, ar->common.curaid);
565 565
566 /* 2. Maybe the AP wants to send multicast/broadcast data? */ 566 /* 2. Maybe the AP wants to send multicast/broadcast data? */
567 cam = !!(tim_ie->bitmap_ctrl & 0x01); 567 cam |= !!(tim_ie->bitmap_ctrl & 0x01);
568 568
569 if (!cam) { 569 if (!cam) {
570 /* back to low-power land. */ 570 /* back to low-power land. */
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index a9b852be4509..39b6f16c87fa 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -402,72 +402,6 @@ static void iwl3945_accumulative_statistics(struct iwl_priv *priv,
402} 402}
403#endif 403#endif
404 404
405/**
406 * iwl3945_good_plcp_health - checks for plcp error.
407 *
408 * When the plcp error is exceeding the thresholds, reset the radio
409 * to improve the throughput.
410 */
411static bool iwl3945_good_plcp_health(struct iwl_priv *priv,
412 struct iwl_rx_packet *pkt)
413{
414 bool rc = true;
415 struct iwl3945_notif_statistics current_stat;
416 int combined_plcp_delta;
417 unsigned int plcp_msec;
418 unsigned long plcp_received_jiffies;
419
420 if (priv->cfg->base_params->plcp_delta_threshold ==
421 IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) {
422 IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n");
423 return rc;
424 }
425 memcpy(&current_stat, pkt->u.raw, sizeof(struct
426 iwl3945_notif_statistics));
427 /*
428 * check for plcp_err and trigger radio reset if it exceeds
429 * the plcp error threshold plcp_delta.
430 */
431 plcp_received_jiffies = jiffies;
432 plcp_msec = jiffies_to_msecs((long) plcp_received_jiffies -
433 (long) priv->plcp_jiffies);
434 priv->plcp_jiffies = plcp_received_jiffies;
435 /*
436 * check to make sure plcp_msec is not 0 to prevent division
437 * by zero.
438 */
439 if (plcp_msec) {
440 combined_plcp_delta =
441 (le32_to_cpu(current_stat.rx.ofdm.plcp_err) -
442 le32_to_cpu(priv->_3945.statistics.rx.ofdm.plcp_err));
443
444 if ((combined_plcp_delta > 0) &&
445 ((combined_plcp_delta * 100) / plcp_msec) >
446 priv->cfg->base_params->plcp_delta_threshold) {
447 /*
448 * if plcp_err exceed the threshold, the following
449 * data is printed in csv format:
450 * Text: plcp_err exceeded %d,
451 * Received ofdm.plcp_err,
452 * Current ofdm.plcp_err,
453 * combined_plcp_delta,
454 * plcp_msec
455 */
456 IWL_DEBUG_RADIO(priv, "plcp_err exceeded %u, "
457 "%u, %d, %u mSecs\n",
458 priv->cfg->base_params->plcp_delta_threshold,
459 le32_to_cpu(current_stat.rx.ofdm.plcp_err),
460 combined_plcp_delta, plcp_msec);
461 /*
462 * Reset the RF radio due to the high plcp
463 * error rate
464 */
465 rc = false;
466 }
467 }
468 return rc;
469}
470
471void iwl3945_hw_rx_statistics(struct iwl_priv *priv, 405void iwl3945_hw_rx_statistics(struct iwl_priv *priv,
472 struct iwl_rx_mem_buffer *rxb) 406 struct iwl_rx_mem_buffer *rxb)
473{ 407{
@@ -2734,7 +2668,6 @@ static struct iwl_lib_ops iwl3945_lib = {
2734 .isr_ops = { 2668 .isr_ops = {
2735 .isr = iwl_isr_legacy, 2669 .isr = iwl_isr_legacy,
2736 }, 2670 },
2737 .check_plcp_health = iwl3945_good_plcp_health,
2738 2671
2739 .debugfs_ops = { 2672 .debugfs_ops = {
2740 .rx_stats_read = iwl3945_ucode_rx_stats_read, 2673 .rx_stats_read = iwl3945_ucode_rx_stats_read,
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index af505bcd7ae0..ef36aff1bb43 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -681,6 +681,8 @@ struct iwl_cfg iwl6000i_2bg_cfg = {
681 .fw_name_pre = IWL6050_FW_PRE, \ 681 .fw_name_pre = IWL6050_FW_PRE, \
682 .ucode_api_max = IWL6050_UCODE_API_MAX, \ 682 .ucode_api_max = IWL6050_UCODE_API_MAX, \
683 .ucode_api_min = IWL6050_UCODE_API_MIN, \ 683 .ucode_api_min = IWL6050_UCODE_API_MIN, \
684 .valid_tx_ant = ANT_AB, /* .cfg overwrite */ \
685 .valid_rx_ant = ANT_AB, /* .cfg overwrite */ \
684 .ops = &iwl6050_ops, \ 686 .ops = &iwl6050_ops, \
685 .eeprom_ver = EEPROM_6050_EEPROM_VERSION, \ 687 .eeprom_ver = EEPROM_6050_EEPROM_VERSION, \
686 .eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION, \ 688 .eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION, \
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 36335b1b54d4..c1cfd9952e52 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -1157,6 +1157,9 @@ static void iwl_irq_tasklet_legacy(struct iwl_priv *priv)
1157 /* only Re-enable if disabled by irq */ 1157 /* only Re-enable if disabled by irq */
1158 if (test_bit(STATUS_INT_ENABLED, &priv->status)) 1158 if (test_bit(STATUS_INT_ENABLED, &priv->status))
1159 iwl_enable_interrupts(priv); 1159 iwl_enable_interrupts(priv);
1160 /* Re-enable RF_KILL if it occurred */
1161 else if (handled & CSR_INT_BIT_RF_KILL)
1162 iwl_enable_rfkill_int(priv);
1160 1163
1161#ifdef CONFIG_IWLWIFI_DEBUG 1164#ifdef CONFIG_IWLWIFI_DEBUG
1162 if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) { 1165 if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) {
@@ -1371,6 +1374,9 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
1371 /* only Re-enable if disabled by irq */ 1374 /* only Re-enable if disabled by irq */
1372 if (test_bit(STATUS_INT_ENABLED, &priv->status)) 1375 if (test_bit(STATUS_INT_ENABLED, &priv->status))
1373 iwl_enable_interrupts(priv); 1376 iwl_enable_interrupts(priv);
1377 /* Re-enable RF_KILL if it occurred */
1378 else if (handled & CSR_INT_BIT_RF_KILL)
1379 iwl_enable_rfkill_int(priv);
1374} 1380}
1375 1381
1376/* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */ 1382/* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */
diff --git a/drivers/net/wireless/rtlwifi/efuse.c b/drivers/net/wireless/rtlwifi/efuse.c
index b8433f3a9bc2..62876cd5c41a 100644
--- a/drivers/net/wireless/rtlwifi/efuse.c
+++ b/drivers/net/wireless/rtlwifi/efuse.c
@@ -726,9 +726,9 @@ static int efuse_pg_packet_read(struct ieee80211_hw *hw, u8 offset, u8 *data)
726} 726}
727 727
728static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr, 728static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr,
729 u8 efuse_data, u8 offset, int *bcontinual, 729 u8 efuse_data, u8 offset, int *bcontinual,
730 u8 *write_state, struct pgpkt_struct target_pkt, 730 u8 *write_state, struct pgpkt_struct *target_pkt,
731 int *repeat_times, int *bresult, u8 word_en) 731 int *repeat_times, int *bresult, u8 word_en)
732{ 732{
733 struct rtl_priv *rtlpriv = rtl_priv(hw); 733 struct rtl_priv *rtlpriv = rtl_priv(hw);
734 struct pgpkt_struct tmp_pkt; 734 struct pgpkt_struct tmp_pkt;
@@ -744,8 +744,8 @@ static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr,
744 tmp_pkt.word_en = tmp_header & 0x0F; 744 tmp_pkt.word_en = tmp_header & 0x0F;
745 tmp_word_cnts = efuse_calculate_word_cnts(tmp_pkt.word_en); 745 tmp_word_cnts = efuse_calculate_word_cnts(tmp_pkt.word_en);
746 746
747 if (tmp_pkt.offset != target_pkt.offset) { 747 if (tmp_pkt.offset != target_pkt->offset) {
748 efuse_addr = efuse_addr + (tmp_word_cnts * 2) + 1; 748 *efuse_addr = *efuse_addr + (tmp_word_cnts * 2) + 1;
749 *write_state = PG_STATE_HEADER; 749 *write_state = PG_STATE_HEADER;
750 } else { 750 } else {
751 for (tmpindex = 0; tmpindex < (tmp_word_cnts * 2); tmpindex++) { 751 for (tmpindex = 0; tmpindex < (tmp_word_cnts * 2); tmpindex++) {
@@ -756,23 +756,23 @@ static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr,
756 } 756 }
757 757
758 if (bdataempty == false) { 758 if (bdataempty == false) {
759 efuse_addr = efuse_addr + (tmp_word_cnts * 2) + 1; 759 *efuse_addr = *efuse_addr + (tmp_word_cnts * 2) + 1;
760 *write_state = PG_STATE_HEADER; 760 *write_state = PG_STATE_HEADER;
761 } else { 761 } else {
762 match_word_en = 0x0F; 762 match_word_en = 0x0F;
763 if (!((target_pkt.word_en & BIT(0)) | 763 if (!((target_pkt->word_en & BIT(0)) |
764 (tmp_pkt.word_en & BIT(0)))) 764 (tmp_pkt.word_en & BIT(0))))
765 match_word_en &= (~BIT(0)); 765 match_word_en &= (~BIT(0));
766 766
767 if (!((target_pkt.word_en & BIT(1)) | 767 if (!((target_pkt->word_en & BIT(1)) |
768 (tmp_pkt.word_en & BIT(1)))) 768 (tmp_pkt.word_en & BIT(1))))
769 match_word_en &= (~BIT(1)); 769 match_word_en &= (~BIT(1));
770 770
771 if (!((target_pkt.word_en & BIT(2)) | 771 if (!((target_pkt->word_en & BIT(2)) |
772 (tmp_pkt.word_en & BIT(2)))) 772 (tmp_pkt.word_en & BIT(2))))
773 match_word_en &= (~BIT(2)); 773 match_word_en &= (~BIT(2));
774 774
775 if (!((target_pkt.word_en & BIT(3)) | 775 if (!((target_pkt->word_en & BIT(3)) |
776 (tmp_pkt.word_en & BIT(3)))) 776 (tmp_pkt.word_en & BIT(3))))
777 match_word_en &= (~BIT(3)); 777 match_word_en &= (~BIT(3));
778 778
@@ -780,7 +780,7 @@ static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr,
780 badworden = efuse_word_enable_data_write( 780 badworden = efuse_word_enable_data_write(
781 hw, *efuse_addr + 1, 781 hw, *efuse_addr + 1,
782 tmp_pkt.word_en, 782 tmp_pkt.word_en,
783 target_pkt.data); 783 target_pkt->data);
784 784
785 if (0x0F != (badworden & 0x0F)) { 785 if (0x0F != (badworden & 0x0F)) {
786 u8 reorg_offset = offset; 786 u8 reorg_offset = offset;
@@ -791,26 +791,26 @@ static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr,
791 } 791 }
792 792
793 tmp_word_en = 0x0F; 793 tmp_word_en = 0x0F;
794 if ((target_pkt.word_en & BIT(0)) ^ 794 if ((target_pkt->word_en & BIT(0)) ^
795 (match_word_en & BIT(0))) 795 (match_word_en & BIT(0)))
796 tmp_word_en &= (~BIT(0)); 796 tmp_word_en &= (~BIT(0));
797 797
798 if ((target_pkt.word_en & BIT(1)) ^ 798 if ((target_pkt->word_en & BIT(1)) ^
799 (match_word_en & BIT(1))) 799 (match_word_en & BIT(1)))
800 tmp_word_en &= (~BIT(1)); 800 tmp_word_en &= (~BIT(1));
801 801
802 if ((target_pkt.word_en & BIT(2)) ^ 802 if ((target_pkt->word_en & BIT(2)) ^
803 (match_word_en & BIT(2))) 803 (match_word_en & BIT(2)))
804 tmp_word_en &= (~BIT(2)); 804 tmp_word_en &= (~BIT(2));
805 805
806 if ((target_pkt.word_en & BIT(3)) ^ 806 if ((target_pkt->word_en & BIT(3)) ^
807 (match_word_en & BIT(3))) 807 (match_word_en & BIT(3)))
808 tmp_word_en &= (~BIT(3)); 808 tmp_word_en &= (~BIT(3));
809 809
810 if ((tmp_word_en & 0x0F) != 0x0F) { 810 if ((tmp_word_en & 0x0F) != 0x0F) {
811 *efuse_addr = efuse_get_current_size(hw); 811 *efuse_addr = efuse_get_current_size(hw);
812 target_pkt.offset = offset; 812 target_pkt->offset = offset;
813 target_pkt.word_en = tmp_word_en; 813 target_pkt->word_en = tmp_word_en;
814 } else 814 } else
815 *bcontinual = false; 815 *bcontinual = false;
816 *write_state = PG_STATE_HEADER; 816 *write_state = PG_STATE_HEADER;
@@ -821,8 +821,8 @@ static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr,
821 } 821 }
822 } else { 822 } else {
823 *efuse_addr += (2 * tmp_word_cnts) + 1; 823 *efuse_addr += (2 * tmp_word_cnts) + 1;
824 target_pkt.offset = offset; 824 target_pkt->offset = offset;
825 target_pkt.word_en = word_en; 825 target_pkt->word_en = word_en;
826 *write_state = PG_STATE_HEADER; 826 *write_state = PG_STATE_HEADER;
827 } 827 }
828 } 828 }
@@ -938,7 +938,7 @@ static int efuse_pg_packet_write(struct ieee80211_hw *hw,
938 efuse_write_data_case1(hw, &efuse_addr, 938 efuse_write_data_case1(hw, &efuse_addr,
939 efuse_data, offset, 939 efuse_data, offset,
940 &bcontinual, 940 &bcontinual,
941 &write_state, target_pkt, 941 &write_state, &target_pkt,
942 &repeat_times, &bresult, 942 &repeat_times, &bresult,
943 word_en); 943 word_en);
944 else 944 else
diff --git a/drivers/net/wireless/wl1251/main.c b/drivers/net/wireless/wl1251/main.c
index 012e1a4016fe..40372bac9482 100644
--- a/drivers/net/wireless/wl1251/main.c
+++ b/drivers/net/wireless/wl1251/main.c
@@ -1039,6 +1039,9 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
1039 1039
1040 if (changed & BSS_CHANGED_BEACON) { 1040 if (changed & BSS_CHANGED_BEACON) {
1041 beacon = ieee80211_beacon_get(hw, vif); 1041 beacon = ieee80211_beacon_get(hw, vif);
1042 if (!beacon)
1043 goto out_sleep;
1044
1042 ret = wl1251_cmd_template_set(wl, CMD_BEACON, beacon->data, 1045 ret = wl1251_cmd_template_set(wl, CMD_BEACON, beacon->data,
1043 beacon->len); 1046 beacon->len);
1044 1047
diff --git a/drivers/net/wireless/wl12xx/spi.c b/drivers/net/wireless/wl12xx/spi.c
index 46714910f98c..7145ea543783 100644
--- a/drivers/net/wireless/wl12xx/spi.c
+++ b/drivers/net/wireless/wl12xx/spi.c
@@ -110,9 +110,8 @@ static void wl1271_spi_reset(struct wl1271 *wl)
110 spi_message_add_tail(&t, &m); 110 spi_message_add_tail(&t, &m);
111 111
112 spi_sync(wl_to_spi(wl), &m); 112 spi_sync(wl_to_spi(wl), &m);
113 kfree(cmd);
114
115 wl1271_dump(DEBUG_SPI, "spi reset -> ", cmd, WSPI_INIT_CMD_LEN); 113 wl1271_dump(DEBUG_SPI, "spi reset -> ", cmd, WSPI_INIT_CMD_LEN);
114 kfree(cmd);
116} 115}
117 116
118static void wl1271_spi_init(struct wl1271 *wl) 117static void wl1271_spi_init(struct wl1271 *wl)
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 546de5749824..da1f12120346 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -120,6 +120,9 @@ struct netfront_info {
120 unsigned long rx_pfn_array[NET_RX_RING_SIZE]; 120 unsigned long rx_pfn_array[NET_RX_RING_SIZE];
121 struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1]; 121 struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1];
122 struct mmu_update rx_mmu[NET_RX_RING_SIZE]; 122 struct mmu_update rx_mmu[NET_RX_RING_SIZE];
123
124 /* Statistics */
125 int rx_gso_checksum_fixup;
123}; 126};
124 127
125struct netfront_rx_info { 128struct netfront_rx_info {
@@ -770,11 +773,29 @@ static RING_IDX xennet_fill_frags(struct netfront_info *np,
770 return cons; 773 return cons;
771} 774}
772 775
773static int skb_checksum_setup(struct sk_buff *skb) 776static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
774{ 777{
775 struct iphdr *iph; 778 struct iphdr *iph;
776 unsigned char *th; 779 unsigned char *th;
777 int err = -EPROTO; 780 int err = -EPROTO;
781 int recalculate_partial_csum = 0;
782
783 /*
784 * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
785 * peers can fail to set NETRXF_csum_blank when sending a GSO
786 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
787 * recalculate the partial checksum.
788 */
789 if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
790 struct netfront_info *np = netdev_priv(dev);
791 np->rx_gso_checksum_fixup++;
792 skb->ip_summed = CHECKSUM_PARTIAL;
793 recalculate_partial_csum = 1;
794 }
795
796 /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
797 if (skb->ip_summed != CHECKSUM_PARTIAL)
798 return 0;
778 799
779 if (skb->protocol != htons(ETH_P_IP)) 800 if (skb->protocol != htons(ETH_P_IP))
780 goto out; 801 goto out;
@@ -788,9 +809,23 @@ static int skb_checksum_setup(struct sk_buff *skb)
788 switch (iph->protocol) { 809 switch (iph->protocol) {
789 case IPPROTO_TCP: 810 case IPPROTO_TCP:
790 skb->csum_offset = offsetof(struct tcphdr, check); 811 skb->csum_offset = offsetof(struct tcphdr, check);
812
813 if (recalculate_partial_csum) {
814 struct tcphdr *tcph = (struct tcphdr *)th;
815 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
816 skb->len - iph->ihl*4,
817 IPPROTO_TCP, 0);
818 }
791 break; 819 break;
792 case IPPROTO_UDP: 820 case IPPROTO_UDP:
793 skb->csum_offset = offsetof(struct udphdr, check); 821 skb->csum_offset = offsetof(struct udphdr, check);
822
823 if (recalculate_partial_csum) {
824 struct udphdr *udph = (struct udphdr *)th;
825 udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
826 skb->len - iph->ihl*4,
827 IPPROTO_UDP, 0);
828 }
794 break; 829 break;
795 default: 830 default:
796 if (net_ratelimit()) 831 if (net_ratelimit())
@@ -829,13 +864,11 @@ static int handle_incoming_queue(struct net_device *dev,
829 /* Ethernet work: Delayed to here as it peeks the header. */ 864 /* Ethernet work: Delayed to here as it peeks the header. */
830 skb->protocol = eth_type_trans(skb, dev); 865 skb->protocol = eth_type_trans(skb, dev);
831 866
832 if (skb->ip_summed == CHECKSUM_PARTIAL) { 867 if (checksum_setup(dev, skb)) {
833 if (skb_checksum_setup(skb)) { 868 kfree_skb(skb);
834 kfree_skb(skb); 869 packets_dropped++;
835 packets_dropped++; 870 dev->stats.rx_errors++;
836 dev->stats.rx_errors++; 871 continue;
837 continue;
838 }
839 } 872 }
840 873
841 dev->stats.rx_packets++; 874 dev->stats.rx_packets++;
@@ -1632,12 +1665,59 @@ static void netback_changed(struct xenbus_device *dev,
1632 } 1665 }
1633} 1666}
1634 1667
1668static const struct xennet_stat {
1669 char name[ETH_GSTRING_LEN];
1670 u16 offset;
1671} xennet_stats[] = {
1672 {
1673 "rx_gso_checksum_fixup",
1674 offsetof(struct netfront_info, rx_gso_checksum_fixup)
1675 },
1676};
1677
1678static int xennet_get_sset_count(struct net_device *dev, int string_set)
1679{
1680 switch (string_set) {
1681 case ETH_SS_STATS:
1682 return ARRAY_SIZE(xennet_stats);
1683 default:
1684 return -EINVAL;
1685 }
1686}
1687
1688static void xennet_get_ethtool_stats(struct net_device *dev,
1689 struct ethtool_stats *stats, u64 * data)
1690{
1691 void *np = netdev_priv(dev);
1692 int i;
1693
1694 for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
1695 data[i] = *(int *)(np + xennet_stats[i].offset);
1696}
1697
1698static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data)
1699{
1700 int i;
1701
1702 switch (stringset) {
1703 case ETH_SS_STATS:
1704 for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
1705 memcpy(data + i * ETH_GSTRING_LEN,
1706 xennet_stats[i].name, ETH_GSTRING_LEN);
1707 break;
1708 }
1709}
1710
1635static const struct ethtool_ops xennet_ethtool_ops = 1711static const struct ethtool_ops xennet_ethtool_ops =
1636{ 1712{
1637 .set_tx_csum = ethtool_op_set_tx_csum, 1713 .set_tx_csum = ethtool_op_set_tx_csum,
1638 .set_sg = xennet_set_sg, 1714 .set_sg = xennet_set_sg,
1639 .set_tso = xennet_set_tso, 1715 .set_tso = xennet_set_tso,
1640 .get_link = ethtool_op_get_link, 1716 .get_link = ethtool_op_get_link,
1717
1718 .get_sset_count = xennet_get_sset_count,
1719 .get_ethtool_stats = xennet_get_ethtool_stats,
1720 .get_strings = xennet_get_strings,
1641}; 1721};
1642 1722
1643#ifdef CONFIG_SYSFS 1723#ifdef CONFIG_SYSFS
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 8ecaac983923..ea25e5bfcf23 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -23,6 +23,7 @@
23#include <linux/mm.h> 23#include <linux/mm.h>
24#include <linux/fs.h> 24#include <linux/fs.h>
25#include <linux/capability.h> 25#include <linux/capability.h>
26#include <linux/security.h>
26#include <linux/pci-aspm.h> 27#include <linux/pci-aspm.h>
27#include <linux/slab.h> 28#include <linux/slab.h>
28#include "pci.h" 29#include "pci.h"
@@ -368,7 +369,7 @@ pci_read_config(struct file *filp, struct kobject *kobj,
368 u8 *data = (u8*) buf; 369 u8 *data = (u8*) buf;
369 370
370 /* Several chips lock up trying to read undefined config space */ 371 /* Several chips lock up trying to read undefined config space */
371 if (cap_raised(filp->f_cred->cap_effective, CAP_SYS_ADMIN)) { 372 if (security_capable(filp->f_cred, CAP_SYS_ADMIN) == 0) {
372 size = dev->cfg_size; 373 size = dev->cfg_size;
373 } else if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) { 374 } else if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) {
374 size = 128; 375 size = 128;
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index d163bc2e2b9e..a59af5b24f0a 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -227,7 +227,7 @@ config SONYPI_COMPAT
227config IDEAPAD_LAPTOP 227config IDEAPAD_LAPTOP
228 tristate "Lenovo IdeaPad Laptop Extras" 228 tristate "Lenovo IdeaPad Laptop Extras"
229 depends on ACPI 229 depends on ACPI
230 depends on RFKILL 230 depends on RFKILL && INPUT
231 select INPUT_SPARSEKMAP 231 select INPUT_SPARSEKMAP
232 help 232 help
233 This is a driver for the rfkill switches on Lenovo IdeaPad netbooks. 233 This is a driver for the rfkill switches on Lenovo IdeaPad netbooks.
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index c5c4b8c32eb8..38b34a73866a 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -84,7 +84,7 @@ MODULE_LICENSE("GPL");
84 */ 84 */
85#define AMW0_GUID1 "67C3371D-95A3-4C37-BB61-DD47B491DAAB" 85#define AMW0_GUID1 "67C3371D-95A3-4C37-BB61-DD47B491DAAB"
86#define AMW0_GUID2 "431F16ED-0C2B-444C-B267-27DEB140CF9C" 86#define AMW0_GUID2 "431F16ED-0C2B-444C-B267-27DEB140CF9C"
87#define WMID_GUID1 "6AF4F258-B401-42fd-BE91-3D4AC2D7C0D3" 87#define WMID_GUID1 "6AF4F258-B401-42FD-BE91-3D4AC2D7C0D3"
88#define WMID_GUID2 "95764E09-FB56-4e83-B31A-37761F60994A" 88#define WMID_GUID2 "95764E09-FB56-4e83-B31A-37761F60994A"
89#define WMID_GUID3 "61EF69EA-865C-4BC3-A502-A0DEBA0CB531" 89#define WMID_GUID3 "61EF69EA-865C-4BC3-A502-A0DEBA0CB531"
90 90
@@ -1280,7 +1280,7 @@ static ssize_t set_bool_threeg(struct device *dev,
1280 return -EINVAL; 1280 return -EINVAL;
1281 return count; 1281 return count;
1282} 1282}
1283static DEVICE_ATTR(threeg, S_IWUGO | S_IRUGO | S_IWUSR, show_bool_threeg, 1283static DEVICE_ATTR(threeg, S_IRUGO | S_IWUSR, show_bool_threeg,
1284 set_bool_threeg); 1284 set_bool_threeg);
1285 1285
1286static ssize_t show_interface(struct device *dev, struct device_attribute *attr, 1286static ssize_t show_interface(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c
index 4633fd8532cc..fe495939c307 100644
--- a/drivers/platform/x86/asus_acpi.c
+++ b/drivers/platform/x86/asus_acpi.c
@@ -1081,14 +1081,8 @@ static int asus_hotk_add_fs(struct acpi_device *device)
1081 struct proc_dir_entry *proc; 1081 struct proc_dir_entry *proc;
1082 mode_t mode; 1082 mode_t mode;
1083 1083
1084 /*
1085 * If parameter uid or gid is not changed, keep the default setting for
1086 * our proc entries (-rw-rw-rw-) else, it means we care about security,
1087 * and then set to -rw-rw----
1088 */
1089
1090 if ((asus_uid == 0) && (asus_gid == 0)) { 1084 if ((asus_uid == 0) && (asus_gid == 0)) {
1091 mode = S_IFREG | S_IRUGO | S_IWUGO; 1085 mode = S_IFREG | S_IRUGO | S_IWUSR | S_IWGRP;
1092 } else { 1086 } else {
1093 mode = S_IFREG | S_IRUSR | S_IRGRP | S_IWUSR | S_IWGRP; 1087 mode = S_IFREG | S_IRUSR | S_IRGRP | S_IWUSR | S_IWGRP;
1094 printk(KERN_WARNING " asus_uid and asus_gid parameters are " 1088 printk(KERN_WARNING " asus_uid and asus_gid parameters are "
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index 34657f96b5a5..ad24ef36f9f7 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -290,9 +290,12 @@ static int dell_rfkill_set(void *data, bool blocked)
290 dell_send_request(buffer, 17, 11); 290 dell_send_request(buffer, 17, 11);
291 291
292 /* If the hardware switch controls this radio, and the hardware 292 /* If the hardware switch controls this radio, and the hardware
293 switch is disabled, don't allow changing the software state */ 293 switch is disabled, don't allow changing the software state.
294 If the hardware switch is reported as not supported, always
295 fire the SMI to toggle the killswitch. */
294 if ((hwswitch_state & BIT(hwswitch_bit)) && 296 if ((hwswitch_state & BIT(hwswitch_bit)) &&
295 !(buffer->output[1] & BIT(16))) { 297 !(buffer->output[1] & BIT(16)) &&
298 (buffer->output[1] & BIT(0))) {
296 ret = -EINVAL; 299 ret = -EINVAL;
297 goto out; 300 goto out;
298 } 301 }
@@ -398,6 +401,23 @@ static const struct file_operations dell_debugfs_fops = {
398 401
399static void dell_update_rfkill(struct work_struct *ignored) 402static void dell_update_rfkill(struct work_struct *ignored)
400{ 403{
404 int status;
405
406 get_buffer();
407 dell_send_request(buffer, 17, 11);
408 status = buffer->output[1];
409 release_buffer();
410
411 /* if hardware rfkill is not supported, set it explicitly */
412 if (!(status & BIT(0))) {
413 if (wifi_rfkill)
414 dell_rfkill_set((void *)1, !((status & BIT(17)) >> 17));
415 if (bluetooth_rfkill)
416 dell_rfkill_set((void *)2, !((status & BIT(18)) >> 18));
417 if (wwan_rfkill)
418 dell_rfkill_set((void *)3, !((status & BIT(19)) >> 19));
419 }
420
401 if (wifi_rfkill) 421 if (wifi_rfkill)
402 dell_rfkill_query(wifi_rfkill, (void *)1); 422 dell_rfkill_query(wifi_rfkill, (void *)1);
403 if (bluetooth_rfkill) 423 if (bluetooth_rfkill)
diff --git a/drivers/platform/x86/intel_pmic_gpio.c b/drivers/platform/x86/intel_pmic_gpio.c
index 930e62762365..61433d492862 100644
--- a/drivers/platform/x86/intel_pmic_gpio.c
+++ b/drivers/platform/x86/intel_pmic_gpio.c
@@ -60,69 +60,20 @@ enum pmic_gpio_register {
60#define GPOSW_DOU 0x08 60#define GPOSW_DOU 0x08
61#define GPOSW_RDRV 0x30 61#define GPOSW_RDRV 0x30
62 62
63#define GPIO_UPDATE_TYPE 0x80000000
63 64
64#define NUM_GPIO 24 65#define NUM_GPIO 24
65 66
66struct pmic_gpio_irq {
67 spinlock_t lock;
68 u32 trigger[NUM_GPIO];
69 u32 dirty;
70 struct work_struct work;
71};
72
73
74struct pmic_gpio { 67struct pmic_gpio {
68 struct mutex buslock;
75 struct gpio_chip chip; 69 struct gpio_chip chip;
76 struct pmic_gpio_irq irqtypes;
77 void *gpiointr; 70 void *gpiointr;
78 int irq; 71 int irq;
79 unsigned irq_base; 72 unsigned irq_base;
73 unsigned int update_type;
74 u32 trigger_type;
80}; 75};
81 76
82static void pmic_program_irqtype(int gpio, int type)
83{
84 if (type & IRQ_TYPE_EDGE_RISING)
85 intel_scu_ipc_update_register(GPIO0 + gpio, 0x20, 0x20);
86 else
87 intel_scu_ipc_update_register(GPIO0 + gpio, 0x00, 0x20);
88
89 if (type & IRQ_TYPE_EDGE_FALLING)
90 intel_scu_ipc_update_register(GPIO0 + gpio, 0x10, 0x10);
91 else
92 intel_scu_ipc_update_register(GPIO0 + gpio, 0x00, 0x10);
93};
94
95static void pmic_irqtype_work(struct work_struct *work)
96{
97 struct pmic_gpio_irq *t =
98 container_of(work, struct pmic_gpio_irq, work);
99 unsigned long flags;
100 int i;
101 u16 type;
102
103 spin_lock_irqsave(&t->lock, flags);
104 /* As we drop the lock, we may need multiple scans if we race the
105 pmic_irq_type function */
106 while (t->dirty) {
107 /*
108 * For each pin that has the dirty bit set send an IPC
109 * message to configure the hardware via the PMIC
110 */
111 for (i = 0; i < NUM_GPIO; i++) {
112 if (!(t->dirty & (1 << i)))
113 continue;
114 t->dirty &= ~(1 << i);
115 /* We can't trust the array entry or dirty
116 once the lock is dropped */
117 type = t->trigger[i];
118 spin_unlock_irqrestore(&t->lock, flags);
119 pmic_program_irqtype(i, type);
120 spin_lock_irqsave(&t->lock, flags);
121 }
122 }
123 spin_unlock_irqrestore(&t->lock, flags);
124}
125
126static int pmic_gpio_direction_input(struct gpio_chip *chip, unsigned offset) 77static int pmic_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
127{ 78{
128 if (offset > 8) { 79 if (offset > 8) {
@@ -190,25 +141,24 @@ static void pmic_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
190 1 << (offset - 16)); 141 1 << (offset - 16));
191} 142}
192 143
193static int pmic_irq_type(unsigned irq, unsigned type) 144/*
145 * This is called from genirq with pg->buslock locked and
146 * irq_desc->lock held. We can not access the scu bus here, so we
147 * store the change and update in the bus_sync_unlock() function below
148 */
149static int pmic_irq_type(struct irq_data *data, unsigned type)
194{ 150{
195 struct pmic_gpio *pg = get_irq_chip_data(irq); 151 struct pmic_gpio *pg = irq_data_get_irq_chip_data(data);
196 u32 gpio = irq - pg->irq_base; 152 u32 gpio = data->irq - pg->irq_base;
197 unsigned long flags;
198 153
199 if (gpio >= pg->chip.ngpio) 154 if (gpio >= pg->chip.ngpio)
200 return -EINVAL; 155 return -EINVAL;
201 156
202 spin_lock_irqsave(&pg->irqtypes.lock, flags); 157 pg->trigger_type = type;
203 pg->irqtypes.trigger[gpio] = type; 158 pg->update_type = gpio | GPIO_UPDATE_TYPE;
204 pg->irqtypes.dirty |= (1 << gpio);
205 spin_unlock_irqrestore(&pg->irqtypes.lock, flags);
206 schedule_work(&pg->irqtypes.work);
207 return 0; 159 return 0;
208} 160}
209 161
210
211
212static int pmic_gpio_to_irq(struct gpio_chip *chip, unsigned offset) 162static int pmic_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
213{ 163{
214 struct pmic_gpio *pg = container_of(chip, struct pmic_gpio, chip); 164 struct pmic_gpio *pg = container_of(chip, struct pmic_gpio, chip);
@@ -217,38 +167,32 @@ static int pmic_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
217} 167}
218 168
219/* the gpiointr register is read-clear, so just do nothing. */ 169/* the gpiointr register is read-clear, so just do nothing. */
220static void pmic_irq_unmask(unsigned irq) 170static void pmic_irq_unmask(struct irq_data *data) { }
221{
222};
223 171
224static void pmic_irq_mask(unsigned irq) 172static void pmic_irq_mask(struct irq_data *data) { }
225{
226};
227 173
228static struct irq_chip pmic_irqchip = { 174static struct irq_chip pmic_irqchip = {
229 .name = "PMIC-GPIO", 175 .name = "PMIC-GPIO",
230 .mask = pmic_irq_mask, 176 .irq_mask = pmic_irq_mask,
231 .unmask = pmic_irq_unmask, 177 .irq_unmask = pmic_irq_unmask,
232 .set_type = pmic_irq_type, 178 .irq_set_type = pmic_irq_type,
233}; 179};
234 180
235static void pmic_irq_handler(unsigned irq, struct irq_desc *desc) 181static irqreturn_t pmic_irq_handler(int irq, void *data)
236{ 182{
237 struct pmic_gpio *pg = (struct pmic_gpio *)get_irq_data(irq); 183 struct pmic_gpio *pg = data;
238 u8 intsts = *((u8 *)pg->gpiointr + 4); 184 u8 intsts = *((u8 *)pg->gpiointr + 4);
239 int gpio; 185 int gpio;
186 irqreturn_t ret = IRQ_NONE;
240 187
241 for (gpio = 0; gpio < 8; gpio++) { 188 for (gpio = 0; gpio < 8; gpio++) {
242 if (intsts & (1 << gpio)) { 189 if (intsts & (1 << gpio)) {
243 pr_debug("pmic pin %d triggered\n", gpio); 190 pr_debug("pmic pin %d triggered\n", gpio);
244 generic_handle_irq(pg->irq_base + gpio); 191 generic_handle_irq(pg->irq_base + gpio);
192 ret = IRQ_HANDLED;
245 } 193 }
246 } 194 }
247 195 return ret;
248 if (desc->chip->irq_eoi)
249 desc->chip->irq_eoi(irq_get_irq_data(irq));
250 else
251 dev_warn(pg->chip.dev, "missing EOI handler for irq %d\n", irq);
252} 196}
253 197
254static int __devinit platform_pmic_gpio_probe(struct platform_device *pdev) 198static int __devinit platform_pmic_gpio_probe(struct platform_device *pdev)
@@ -297,8 +241,7 @@ static int __devinit platform_pmic_gpio_probe(struct platform_device *pdev)
297 pg->chip.can_sleep = 1; 241 pg->chip.can_sleep = 1;
298 pg->chip.dev = dev; 242 pg->chip.dev = dev;
299 243
300 INIT_WORK(&pg->irqtypes.work, pmic_irqtype_work); 244 mutex_init(&pg->buslock);
301 spin_lock_init(&pg->irqtypes.lock);
302 245
303 pg->chip.dev = dev; 246 pg->chip.dev = dev;
304 retval = gpiochip_add(&pg->chip); 247 retval = gpiochip_add(&pg->chip);
@@ -306,8 +249,13 @@ static int __devinit platform_pmic_gpio_probe(struct platform_device *pdev)
306 printk(KERN_ERR "%s: Can not add pmic gpio chip.\n", __func__); 249 printk(KERN_ERR "%s: Can not add pmic gpio chip.\n", __func__);
307 goto err; 250 goto err;
308 } 251 }
309 set_irq_data(pg->irq, pg); 252
310 set_irq_chained_handler(pg->irq, pmic_irq_handler); 253 retval = request_irq(pg->irq, pmic_irq_handler, 0, "pmic", pg);
254 if (retval) {
255 printk(KERN_WARNING "pmic: Interrupt request failed\n");
256 goto err;
257 }
258
311 for (i = 0; i < 8; i++) { 259 for (i = 0; i < 8; i++) {
312 set_irq_chip_and_handler_name(i + pg->irq_base, &pmic_irqchip, 260 set_irq_chip_and_handler_name(i + pg->irq_base, &pmic_irqchip,
313 handle_simple_irq, "demux"); 261 handle_simple_irq, "demux");
diff --git a/drivers/platform/x86/tc1100-wmi.c b/drivers/platform/x86/tc1100-wmi.c
index 1fe0f1feff71..865ef78d6f1a 100644
--- a/drivers/platform/x86/tc1100-wmi.c
+++ b/drivers/platform/x86/tc1100-wmi.c
@@ -162,7 +162,7 @@ set_bool_##value(struct device *dev, struct device_attribute *attr, \
162 return -EINVAL; \ 162 return -EINVAL; \
163 return count; \ 163 return count; \
164} \ 164} \
165static DEVICE_ATTR(value, S_IWUGO | S_IRUGO | S_IWUSR, \ 165static DEVICE_ATTR(value, S_IRUGO | S_IWUSR, \
166 show_bool_##value, set_bool_##value); 166 show_bool_##value, set_bool_##value);
167 167
168show_set_bool(wireless, TC1100_INSTANCE_WIRELESS); 168show_set_bool(wireless, TC1100_INSTANCE_WIRELESS);
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index dd599585c6a9..eb9922385ef8 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -2275,16 +2275,12 @@ static void tpacpi_input_send_key(const unsigned int scancode)
2275 if (keycode != KEY_RESERVED) { 2275 if (keycode != KEY_RESERVED) {
2276 mutex_lock(&tpacpi_inputdev_send_mutex); 2276 mutex_lock(&tpacpi_inputdev_send_mutex);
2277 2277
2278 input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN, scancode);
2278 input_report_key(tpacpi_inputdev, keycode, 1); 2279 input_report_key(tpacpi_inputdev, keycode, 1);
2279 if (keycode == KEY_UNKNOWN)
2280 input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN,
2281 scancode);
2282 input_sync(tpacpi_inputdev); 2280 input_sync(tpacpi_inputdev);
2283 2281
2282 input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN, scancode);
2284 input_report_key(tpacpi_inputdev, keycode, 0); 2283 input_report_key(tpacpi_inputdev, keycode, 0);
2285 if (keycode == KEY_UNKNOWN)
2286 input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN,
2287 scancode);
2288 input_sync(tpacpi_inputdev); 2284 input_sync(tpacpi_inputdev);
2289 2285
2290 mutex_unlock(&tpacpi_inputdev_send_mutex); 2286 mutex_unlock(&tpacpi_inputdev_send_mutex);
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index cdd97192dc69..4941cade319f 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -97,6 +97,18 @@ config RTC_INTF_DEV
97 97
98 If unsure, say Y. 98 If unsure, say Y.
99 99
100config RTC_INTF_DEV_UIE_EMUL
101 bool "RTC UIE emulation on dev interface"
102 depends on RTC_INTF_DEV
103 help
104 Provides an emulation for RTC_UIE if the underlying rtc chip
105 driver does not expose RTC_UIE ioctls. Those requests generate
106 once-per-second update interrupts, used for synchronization.
107
108 The emulation code will read the time from the hardware
109 clock several times per second, please enable this option
110 only if you know that you really need it.
111
100config RTC_DRV_TEST 112config RTC_DRV_TEST
101 tristate "Test driver/device" 113 tristate "Test driver/device"
102 help 114 help
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index 9583cbcc6b79..c404b61386bf 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -143,6 +143,7 @@ struct rtc_device *rtc_device_register(const char *name, struct device *dev,
143 rtc->id = id; 143 rtc->id = id;
144 rtc->ops = ops; 144 rtc->ops = ops;
145 rtc->owner = owner; 145 rtc->owner = owner;
146 rtc->irq_freq = 1;
146 rtc->max_user_freq = 64; 147 rtc->max_user_freq = 64;
147 rtc->dev.parent = dev; 148 rtc->dev.parent = dev;
148 rtc->dev.class = rtc_class; 149 rtc->dev.class = rtc_class;
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 925006d33109..cb2f0728fd70 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -209,9 +209,8 @@ int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled)
209 } 209 }
210 210
211 if (err) 211 if (err)
212 return err; 212 /* nothing */;
213 213 else if (!rtc->ops)
214 if (!rtc->ops)
215 err = -ENODEV; 214 err = -ENODEV;
216 else if (!rtc->ops->alarm_irq_enable) 215 else if (!rtc->ops->alarm_irq_enable)
217 err = -EINVAL; 216 err = -EINVAL;
@@ -229,6 +228,12 @@ int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled)
229 if (err) 228 if (err)
230 return err; 229 return err;
231 230
231#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
232 if (enabled == 0 && rtc->uie_irq_active) {
233 mutex_unlock(&rtc->ops_lock);
234 return rtc_dev_update_irq_enable_emul(rtc, 0);
235 }
236#endif
232 /* make sure we're changing state */ 237 /* make sure we're changing state */
233 if (rtc->uie_rtctimer.enabled == enabled) 238 if (rtc->uie_rtctimer.enabled == enabled)
234 goto out; 239 goto out;
@@ -248,6 +253,16 @@ int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled)
248 253
249out: 254out:
250 mutex_unlock(&rtc->ops_lock); 255 mutex_unlock(&rtc->ops_lock);
256#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
257 /*
258 * Enable emulation if the driver did not provide
259 * the update_irq_enable function pointer or if returned
260 * -EINVAL to signal that it has been configured without
261 * interrupts or that are not available at the moment.
262 */
263 if (err == -EINVAL)
264 err = rtc_dev_update_irq_enable_emul(rtc, enabled);
265#endif
251 return err; 266 return err;
252 267
253} 268}
@@ -263,7 +278,7 @@ EXPORT_SYMBOL_GPL(rtc_update_irq_enable);
263 * 278 *
264 * Triggers the registered irq_task function callback. 279 * Triggers the registered irq_task function callback.
265 */ 280 */
266static void rtc_handle_legacy_irq(struct rtc_device *rtc, int num, int mode) 281void rtc_handle_legacy_irq(struct rtc_device *rtc, int num, int mode)
267{ 282{
268 unsigned long flags; 283 unsigned long flags;
269 284
@@ -464,6 +479,9 @@ int rtc_irq_set_freq(struct rtc_device *rtc, struct rtc_task *task, int freq)
464 int err = 0; 479 int err = 0;
465 unsigned long flags; 480 unsigned long flags;
466 481
482 if (freq <= 0)
483 return -EINVAL;
484
467 spin_lock_irqsave(&rtc->irq_task_lock, flags); 485 spin_lock_irqsave(&rtc->irq_task_lock, flags);
468 if (rtc->irq_task != NULL && task == NULL) 486 if (rtc->irq_task != NULL && task == NULL)
469 err = -EBUSY; 487 err = -EBUSY;
diff --git a/drivers/rtc/rtc-at32ap700x.c b/drivers/rtc/rtc-at32ap700x.c
index b2752b6e7a2f..e725d51e773d 100644
--- a/drivers/rtc/rtc-at32ap700x.c
+++ b/drivers/rtc/rtc-at32ap700x.c
@@ -134,36 +134,29 @@ static int at32_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
134 return ret; 134 return ret;
135} 135}
136 136
137static int at32_rtc_ioctl(struct device *dev, unsigned int cmd, 137static int at32_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
138 unsigned long arg)
139{ 138{
140 struct rtc_at32ap700x *rtc = dev_get_drvdata(dev); 139 struct rtc_at32ap700x *rtc = dev_get_drvdata(dev);
141 int ret = 0; 140 int ret = 0;
142 141
143 spin_lock_irq(&rtc->lock); 142 spin_lock_irq(&rtc->lock);
144 143
145 switch (cmd) { 144 if(enabled) {
146 case RTC_AIE_ON:
147 if (rtc_readl(rtc, VAL) > rtc->alarm_time) { 145 if (rtc_readl(rtc, VAL) > rtc->alarm_time) {
148 ret = -EINVAL; 146 ret = -EINVAL;
149 break; 147 goto out;
150 } 148 }
151 rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL) 149 rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL)
152 | RTC_BIT(CTRL_TOPEN)); 150 | RTC_BIT(CTRL_TOPEN));
153 rtc_writel(rtc, ICR, RTC_BIT(ICR_TOPI)); 151 rtc_writel(rtc, ICR, RTC_BIT(ICR_TOPI));
154 rtc_writel(rtc, IER, RTC_BIT(IER_TOPI)); 152 rtc_writel(rtc, IER, RTC_BIT(IER_TOPI));
155 break; 153 } else {
156 case RTC_AIE_OFF:
157 rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL) 154 rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL)
158 & ~RTC_BIT(CTRL_TOPEN)); 155 & ~RTC_BIT(CTRL_TOPEN));
159 rtc_writel(rtc, IDR, RTC_BIT(IDR_TOPI)); 156 rtc_writel(rtc, IDR, RTC_BIT(IDR_TOPI));
160 rtc_writel(rtc, ICR, RTC_BIT(ICR_TOPI)); 157 rtc_writel(rtc, ICR, RTC_BIT(ICR_TOPI));
161 break;
162 default:
163 ret = -ENOIOCTLCMD;
164 break;
165 } 158 }
166 159out:
167 spin_unlock_irq(&rtc->lock); 160 spin_unlock_irq(&rtc->lock);
168 161
169 return ret; 162 return ret;
@@ -195,11 +188,11 @@ static irqreturn_t at32_rtc_interrupt(int irq, void *dev_id)
195} 188}
196 189
197static struct rtc_class_ops at32_rtc_ops = { 190static struct rtc_class_ops at32_rtc_ops = {
198 .ioctl = at32_rtc_ioctl,
199 .read_time = at32_rtc_readtime, 191 .read_time = at32_rtc_readtime,
200 .set_time = at32_rtc_settime, 192 .set_time = at32_rtc_settime,
201 .read_alarm = at32_rtc_readalarm, 193 .read_alarm = at32_rtc_readalarm,
202 .set_alarm = at32_rtc_setalarm, 194 .set_alarm = at32_rtc_setalarm,
195 .alarm_irq_enable = at32_rtc_alarm_irq_enable,
203}; 196};
204 197
205static int __init at32_rtc_probe(struct platform_device *pdev) 198static int __init at32_rtc_probe(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index bc8bbca9a2e2..26d1cf5d19ae 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -195,13 +195,6 @@ static int at91_rtc_ioctl(struct device *dev, unsigned int cmd,
195 195
196 /* important: scrub old status before enabling IRQs */ 196 /* important: scrub old status before enabling IRQs */
197 switch (cmd) { 197 switch (cmd) {
198 case RTC_AIE_OFF: /* alarm off */
199 at91_sys_write(AT91_RTC_IDR, AT91_RTC_ALARM);
200 break;
201 case RTC_AIE_ON: /* alarm on */
202 at91_sys_write(AT91_RTC_SCCR, AT91_RTC_ALARM);
203 at91_sys_write(AT91_RTC_IER, AT91_RTC_ALARM);
204 break;
205 case RTC_UIE_OFF: /* update off */ 198 case RTC_UIE_OFF: /* update off */
206 at91_sys_write(AT91_RTC_IDR, AT91_RTC_SECEV); 199 at91_sys_write(AT91_RTC_IDR, AT91_RTC_SECEV);
207 break; 200 break;
@@ -217,6 +210,18 @@ static int at91_rtc_ioctl(struct device *dev, unsigned int cmd,
217 return ret; 210 return ret;
218} 211}
219 212
213static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
214{
215 pr_debug("%s(): cmd=%08x\n", __func__, enabled);
216
217 if (enabled) {
218 at91_sys_write(AT91_RTC_SCCR, AT91_RTC_ALARM);
219 at91_sys_write(AT91_RTC_IER, AT91_RTC_ALARM);
220 } else
221 at91_sys_write(AT91_RTC_IDR, AT91_RTC_ALARM);
222
223 return 0;
224}
220/* 225/*
221 * Provide additional RTC information in /proc/driver/rtc 226 * Provide additional RTC information in /proc/driver/rtc
222 */ 227 */
@@ -270,6 +275,7 @@ static const struct rtc_class_ops at91_rtc_ops = {
270 .read_alarm = at91_rtc_readalarm, 275 .read_alarm = at91_rtc_readalarm,
271 .set_alarm = at91_rtc_setalarm, 276 .set_alarm = at91_rtc_setalarm,
272 .proc = at91_rtc_proc, 277 .proc = at91_rtc_proc,
278 .alarm_irq_enable = at91_rtc_alarm_irq_enable,
273}; 279};
274 280
275/* 281/*
diff --git a/drivers/rtc/rtc-at91sam9.c b/drivers/rtc/rtc-at91sam9.c
index f677e0710ca1..c36749e4c926 100644
--- a/drivers/rtc/rtc-at91sam9.c
+++ b/drivers/rtc/rtc-at91sam9.c
@@ -229,12 +229,6 @@ static int at91_rtc_ioctl(struct device *dev, unsigned int cmd,
229 dev_dbg(dev, "ioctl: cmd=%08x, arg=%08lx, mr %08x\n", cmd, arg, mr); 229 dev_dbg(dev, "ioctl: cmd=%08x, arg=%08lx, mr %08x\n", cmd, arg, mr);
230 230
231 switch (cmd) { 231 switch (cmd) {
232 case RTC_AIE_OFF: /* alarm off */
233 rtt_writel(rtc, MR, mr & ~AT91_RTT_ALMIEN);
234 break;
235 case RTC_AIE_ON: /* alarm on */
236 rtt_writel(rtc, MR, mr | AT91_RTT_ALMIEN);
237 break;
238 case RTC_UIE_OFF: /* update off */ 232 case RTC_UIE_OFF: /* update off */
239 rtt_writel(rtc, MR, mr & ~AT91_RTT_RTTINCIEN); 233 rtt_writel(rtc, MR, mr & ~AT91_RTT_RTTINCIEN);
240 break; 234 break;
@@ -249,6 +243,19 @@ static int at91_rtc_ioctl(struct device *dev, unsigned int cmd,
249 return ret; 243 return ret;
250} 244}
251 245
246static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
247{
248 struct sam9_rtc *rtc = dev_get_drvdata(dev);
249 u32 mr = rtt_readl(rtc, MR);
250
251 dev_dbg(dev, "alarm_irq_enable: enabled=%08x, mr %08x\n", enabled, mr);
252 if (enabled)
253 rtt_writel(rtc, MR, mr | AT91_RTT_ALMIEN);
254 else
255 rtt_writel(rtc, MR, mr & ~AT91_RTT_ALMIEN);
256 return 0;
257}
258
252/* 259/*
253 * Provide additional RTC information in /proc/driver/rtc 260 * Provide additional RTC information in /proc/driver/rtc
254 */ 261 */
@@ -302,6 +309,7 @@ static const struct rtc_class_ops at91_rtc_ops = {
302 .read_alarm = at91_rtc_readalarm, 309 .read_alarm = at91_rtc_readalarm,
303 .set_alarm = at91_rtc_setalarm, 310 .set_alarm = at91_rtc_setalarm,
304 .proc = at91_rtc_proc, 311 .proc = at91_rtc_proc,
312 .alarm_irq_enabled = at91_rtc_alarm_irq_enable,
305}; 313};
306 314
307/* 315/*
diff --git a/drivers/rtc/rtc-bfin.c b/drivers/rtc/rtc-bfin.c
index b4b6087f2234..17971d93354d 100644
--- a/drivers/rtc/rtc-bfin.c
+++ b/drivers/rtc/rtc-bfin.c
@@ -259,15 +259,6 @@ static int bfin_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long ar
259 bfin_rtc_int_clear(~RTC_ISTAT_SEC); 259 bfin_rtc_int_clear(~RTC_ISTAT_SEC);
260 break; 260 break;
261 261
262 case RTC_AIE_ON:
263 dev_dbg_stamp(dev);
264 bfin_rtc_int_set_alarm(rtc);
265 break;
266 case RTC_AIE_OFF:
267 dev_dbg_stamp(dev);
268 bfin_rtc_int_clear(~(RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY));
269 break;
270
271 default: 262 default:
272 dev_dbg_stamp(dev); 263 dev_dbg_stamp(dev);
273 ret = -ENOIOCTLCMD; 264 ret = -ENOIOCTLCMD;
@@ -276,6 +267,17 @@ static int bfin_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long ar
276 return ret; 267 return ret;
277} 268}
278 269
270static int bfin_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
271{
272 struct bfin_rtc *rtc = dev_get_drvdata(dev);
273
274 dev_dbg_stamp(dev);
275 if (enabled)
276 bfin_rtc_int_set_alarm(rtc);
277 else
278 bfin_rtc_int_clear(~(RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY));
279}
280
279static int bfin_rtc_read_time(struct device *dev, struct rtc_time *tm) 281static int bfin_rtc_read_time(struct device *dev, struct rtc_time *tm)
280{ 282{
281 struct bfin_rtc *rtc = dev_get_drvdata(dev); 283 struct bfin_rtc *rtc = dev_get_drvdata(dev);
@@ -362,6 +364,7 @@ static struct rtc_class_ops bfin_rtc_ops = {
362 .read_alarm = bfin_rtc_read_alarm, 364 .read_alarm = bfin_rtc_read_alarm,
363 .set_alarm = bfin_rtc_set_alarm, 365 .set_alarm = bfin_rtc_set_alarm,
364 .proc = bfin_rtc_proc, 366 .proc = bfin_rtc_proc,
367 .alarm_irq_enable = bfin_rtc_alarm_irq_enable,
365}; 368};
366 369
367static int __devinit bfin_rtc_probe(struct platform_device *pdev) 370static int __devinit bfin_rtc_probe(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
index 212b16edafc0..d0e06edb14c5 100644
--- a/drivers/rtc/rtc-dev.c
+++ b/drivers/rtc/rtc-dev.c
@@ -46,6 +46,105 @@ static int rtc_dev_open(struct inode *inode, struct file *file)
46 return err; 46 return err;
47} 47}
48 48
49#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
50/*
51 * Routine to poll RTC seconds field for change as often as possible,
52 * after first RTC_UIE use timer to reduce polling
53 */
54static void rtc_uie_task(struct work_struct *work)
55{
56 struct rtc_device *rtc =
57 container_of(work, struct rtc_device, uie_task);
58 struct rtc_time tm;
59 int num = 0;
60 int err;
61
62 err = rtc_read_time(rtc, &tm);
63
64 spin_lock_irq(&rtc->irq_lock);
65 if (rtc->stop_uie_polling || err) {
66 rtc->uie_task_active = 0;
67 } else if (rtc->oldsecs != tm.tm_sec) {
68 num = (tm.tm_sec + 60 - rtc->oldsecs) % 60;
69 rtc->oldsecs = tm.tm_sec;
70 rtc->uie_timer.expires = jiffies + HZ - (HZ/10);
71 rtc->uie_timer_active = 1;
72 rtc->uie_task_active = 0;
73 add_timer(&rtc->uie_timer);
74 } else if (schedule_work(&rtc->uie_task) == 0) {
75 rtc->uie_task_active = 0;
76 }
77 spin_unlock_irq(&rtc->irq_lock);
78 if (num)
79 rtc_handle_legacy_irq(rtc, num, RTC_UF);
80}
81static void rtc_uie_timer(unsigned long data)
82{
83 struct rtc_device *rtc = (struct rtc_device *)data;
84 unsigned long flags;
85
86 spin_lock_irqsave(&rtc->irq_lock, flags);
87 rtc->uie_timer_active = 0;
88 rtc->uie_task_active = 1;
89 if ((schedule_work(&rtc->uie_task) == 0))
90 rtc->uie_task_active = 0;
91 spin_unlock_irqrestore(&rtc->irq_lock, flags);
92}
93
94static int clear_uie(struct rtc_device *rtc)
95{
96 spin_lock_irq(&rtc->irq_lock);
97 if (rtc->uie_irq_active) {
98 rtc->stop_uie_polling = 1;
99 if (rtc->uie_timer_active) {
100 spin_unlock_irq(&rtc->irq_lock);
101 del_timer_sync(&rtc->uie_timer);
102 spin_lock_irq(&rtc->irq_lock);
103 rtc->uie_timer_active = 0;
104 }
105 if (rtc->uie_task_active) {
106 spin_unlock_irq(&rtc->irq_lock);
107 flush_scheduled_work();
108 spin_lock_irq(&rtc->irq_lock);
109 }
110 rtc->uie_irq_active = 0;
111 }
112 spin_unlock_irq(&rtc->irq_lock);
113 return 0;
114}
115
116static int set_uie(struct rtc_device *rtc)
117{
118 struct rtc_time tm;
119 int err;
120
121 err = rtc_read_time(rtc, &tm);
122 if (err)
123 return err;
124 spin_lock_irq(&rtc->irq_lock);
125 if (!rtc->uie_irq_active) {
126 rtc->uie_irq_active = 1;
127 rtc->stop_uie_polling = 0;
128 rtc->oldsecs = tm.tm_sec;
129 rtc->uie_task_active = 1;
130 if (schedule_work(&rtc->uie_task) == 0)
131 rtc->uie_task_active = 0;
132 }
133 rtc->irq_data = 0;
134 spin_unlock_irq(&rtc->irq_lock);
135 return 0;
136}
137
138int rtc_dev_update_irq_enable_emul(struct rtc_device *rtc, unsigned int enabled)
139{
140 if (enabled)
141 return set_uie(rtc);
142 else
143 return clear_uie(rtc);
144}
145EXPORT_SYMBOL(rtc_dev_update_irq_enable_emul);
146
147#endif /* CONFIG_RTC_INTF_DEV_UIE_EMUL */
49 148
50static ssize_t 149static ssize_t
51rtc_dev_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) 150rtc_dev_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
@@ -154,19 +253,7 @@ static long rtc_dev_ioctl(struct file *file,
154 if (err) 253 if (err)
155 goto done; 254 goto done;
156 255
157 /* try the driver's ioctl interface */ 256 /*
158 if (ops->ioctl) {
159 err = ops->ioctl(rtc->dev.parent, cmd, arg);
160 if (err != -ENOIOCTLCMD) {
161 mutex_unlock(&rtc->ops_lock);
162 return err;
163 }
164 }
165
166 /* if the driver does not provide the ioctl interface
167 * or if that particular ioctl was not implemented
168 * (-ENOIOCTLCMD), we will try to emulate here.
169 *
170 * Drivers *SHOULD NOT* provide ioctl implementations 257 * Drivers *SHOULD NOT* provide ioctl implementations
171 * for these requests. Instead, provide methods to 258 * for these requests. Instead, provide methods to
172 * support the following code, so that the RTC's main 259 * support the following code, so that the RTC's main
@@ -329,7 +416,12 @@ static long rtc_dev_ioctl(struct file *file,
329 return err; 416 return err;
330 417
331 default: 418 default:
332 err = -ENOTTY; 419 /* Finally try the driver's ioctl interface */
420 if (ops->ioctl) {
421 err = ops->ioctl(rtc->dev.parent, cmd, arg);
422 if (err == -ENOIOCTLCMD)
423 err = -ENOTTY;
424 }
333 break; 425 break;
334 } 426 }
335 427
@@ -394,6 +486,11 @@ void rtc_dev_prepare(struct rtc_device *rtc)
394 486
395 rtc->dev.devt = MKDEV(MAJOR(rtc_devt), rtc->id); 487 rtc->dev.devt = MKDEV(MAJOR(rtc_devt), rtc->id);
396 488
489#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
490 INIT_WORK(&rtc->uie_task, rtc_uie_task);
491 setup_timer(&rtc->uie_timer, rtc_uie_timer, (unsigned long)rtc);
492#endif
493
397 cdev_init(&rtc->char_dev, &rtc_dev_fops); 494 cdev_init(&rtc->char_dev, &rtc_dev_fops);
398 rtc->char_dev.owner = rtc->owner; 495 rtc->char_dev.owner = rtc->owner;
399} 496}
diff --git a/drivers/rtc/rtc-ds1286.c b/drivers/rtc/rtc-ds1286.c
index bf430f9091ed..60ce69600828 100644
--- a/drivers/rtc/rtc-ds1286.c
+++ b/drivers/rtc/rtc-ds1286.c
@@ -40,6 +40,26 @@ static inline void ds1286_rtc_write(struct ds1286_priv *priv, u8 data, int reg)
40 __raw_writel(data, &priv->rtcregs[reg]); 40 __raw_writel(data, &priv->rtcregs[reg]);
41} 41}
42 42
43
44static int ds1286_alarm_irq_enable(struct device *dev, unsigned int enabled)
45{
46 struct ds1286_priv *priv = dev_get_drvdata(dev);
47 unsigned long flags;
48 unsigned char val;
49
50 /* Allow or mask alarm interrupts */
51 spin_lock_irqsave(&priv->lock, flags);
52 val = ds1286_rtc_read(priv, RTC_CMD);
53 if (enabled)
54 val &= ~RTC_TDM;
55 else
56 val |= RTC_TDM;
57 ds1286_rtc_write(priv, val, RTC_CMD);
58 spin_unlock_irqrestore(&priv->lock, flags);
59
60 return 0;
61}
62
43#ifdef CONFIG_RTC_INTF_DEV 63#ifdef CONFIG_RTC_INTF_DEV
44 64
45static int ds1286_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) 65static int ds1286_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
@@ -49,22 +69,6 @@ static int ds1286_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
49 unsigned char val; 69 unsigned char val;
50 70
51 switch (cmd) { 71 switch (cmd) {
52 case RTC_AIE_OFF:
53 /* Mask alarm int. enab. bit */
54 spin_lock_irqsave(&priv->lock, flags);
55 val = ds1286_rtc_read(priv, RTC_CMD);
56 val |= RTC_TDM;
57 ds1286_rtc_write(priv, val, RTC_CMD);
58 spin_unlock_irqrestore(&priv->lock, flags);
59 break;
60 case RTC_AIE_ON:
61 /* Allow alarm interrupts. */
62 spin_lock_irqsave(&priv->lock, flags);
63 val = ds1286_rtc_read(priv, RTC_CMD);
64 val &= ~RTC_TDM;
65 ds1286_rtc_write(priv, val, RTC_CMD);
66 spin_unlock_irqrestore(&priv->lock, flags);
67 break;
68 case RTC_WIE_OFF: 72 case RTC_WIE_OFF:
69 /* Mask watchdog int. enab. bit */ 73 /* Mask watchdog int. enab. bit */
70 spin_lock_irqsave(&priv->lock, flags); 74 spin_lock_irqsave(&priv->lock, flags);
@@ -316,12 +320,13 @@ static int ds1286_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
316} 320}
317 321
318static const struct rtc_class_ops ds1286_ops = { 322static const struct rtc_class_ops ds1286_ops = {
319 .ioctl = ds1286_ioctl, 323 .ioctl = ds1286_ioctl,
320 .proc = ds1286_proc, 324 .proc = ds1286_proc,
321 .read_time = ds1286_read_time, 325 .read_time = ds1286_read_time,
322 .set_time = ds1286_set_time, 326 .set_time = ds1286_set_time,
323 .read_alarm = ds1286_read_alarm, 327 .read_alarm = ds1286_read_alarm,
324 .set_alarm = ds1286_set_alarm, 328 .set_alarm = ds1286_set_alarm,
329 .alarm_irq_enable = ds1286_alarm_irq_enable,
325}; 330};
326 331
327static int __devinit ds1286_probe(struct platform_device *pdev) 332static int __devinit ds1286_probe(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-ds1305.c b/drivers/rtc/rtc-ds1305.c
index 077af1d7b9e4..57fbcc149ba7 100644
--- a/drivers/rtc/rtc-ds1305.c
+++ b/drivers/rtc/rtc-ds1305.c
@@ -139,49 +139,32 @@ static u8 hour2bcd(bool hr12, int hour)
139 * Interface to RTC framework 139 * Interface to RTC framework
140 */ 140 */
141 141
142#ifdef CONFIG_RTC_INTF_DEV 142static int ds1305_alarm_irq_enable(struct device *dev, unsigned int enabled)
143
144/*
145 * Context: caller holds rtc->ops_lock (to protect ds1305->ctrl)
146 */
147static int ds1305_ioctl(struct device *dev, unsigned cmd, unsigned long arg)
148{ 143{
149 struct ds1305 *ds1305 = dev_get_drvdata(dev); 144 struct ds1305 *ds1305 = dev_get_drvdata(dev);
150 u8 buf[2]; 145 u8 buf[2];
151 int status = -ENOIOCTLCMD; 146 long err = -EINVAL;
152 147
153 buf[0] = DS1305_WRITE | DS1305_CONTROL; 148 buf[0] = DS1305_WRITE | DS1305_CONTROL;
154 buf[1] = ds1305->ctrl[0]; 149 buf[1] = ds1305->ctrl[0];
155 150
156 switch (cmd) { 151 if (enabled) {
157 case RTC_AIE_OFF:
158 status = 0;
159 if (!(buf[1] & DS1305_AEI0))
160 goto done;
161 buf[1] &= ~DS1305_AEI0;
162 break;
163
164 case RTC_AIE_ON:
165 status = 0;
166 if (ds1305->ctrl[0] & DS1305_AEI0) 152 if (ds1305->ctrl[0] & DS1305_AEI0)
167 goto done; 153 goto done;
168 buf[1] |= DS1305_AEI0; 154 buf[1] |= DS1305_AEI0;
169 break; 155 } else {
170 } 156 if (!(buf[1] & DS1305_AEI0))
171 if (status == 0) { 157 goto done;
172 status = spi_write_then_read(ds1305->spi, buf, sizeof buf, 158 buf[1] &= ~DS1305_AEI0;
173 NULL, 0);
174 if (status >= 0)
175 ds1305->ctrl[0] = buf[1];
176 } 159 }
177 160 err = spi_write_then_read(ds1305->spi, buf, sizeof buf, NULL, 0);
161 if (err >= 0)
162 ds1305->ctrl[0] = buf[1];
178done: 163done:
179 return status; 164 return err;
165
180} 166}
181 167
182#else
183#define ds1305_ioctl NULL
184#endif
185 168
186/* 169/*
187 * Get/set of date and time is pretty normal. 170 * Get/set of date and time is pretty normal.
@@ -460,12 +443,12 @@ done:
460#endif 443#endif
461 444
462static const struct rtc_class_ops ds1305_ops = { 445static const struct rtc_class_ops ds1305_ops = {
463 .ioctl = ds1305_ioctl,
464 .read_time = ds1305_get_time, 446 .read_time = ds1305_get_time,
465 .set_time = ds1305_set_time, 447 .set_time = ds1305_set_time,
466 .read_alarm = ds1305_get_alarm, 448 .read_alarm = ds1305_get_alarm,
467 .set_alarm = ds1305_set_alarm, 449 .set_alarm = ds1305_set_alarm,
468 .proc = ds1305_proc, 450 .proc = ds1305_proc,
451 .alarm_irq_enable = ds1305_alarm_irq_enable,
469}; 452};
470 453
471static void ds1305_work(struct work_struct *work) 454static void ds1305_work(struct work_struct *work)
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index 0d559b6416dd..4724ba3acf1a 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -495,50 +495,27 @@ static int ds1337_set_alarm(struct device *dev, struct rtc_wkalrm *t)
495 return 0; 495 return 0;
496} 496}
497 497
498static int ds1307_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) 498static int ds1307_alarm_irq_enable(struct device *dev, unsigned int enabled)
499{ 499{
500 struct i2c_client *client = to_i2c_client(dev); 500 struct i2c_client *client = to_i2c_client(dev);
501 struct ds1307 *ds1307 = i2c_get_clientdata(client); 501 struct ds1307 *ds1307 = i2c_get_clientdata(client);
502 int ret; 502 int ret;
503 503
504 switch (cmd) { 504 if (!test_bit(HAS_ALARM, &ds1307->flags))
505 case RTC_AIE_OFF: 505 return -ENOTTY;
506 if (!test_bit(HAS_ALARM, &ds1307->flags))
507 return -ENOTTY;
508
509 ret = i2c_smbus_read_byte_data(client, DS1337_REG_CONTROL);
510 if (ret < 0)
511 return ret;
512
513 ret &= ~DS1337_BIT_A1IE;
514
515 ret = i2c_smbus_write_byte_data(client,
516 DS1337_REG_CONTROL, ret);
517 if (ret < 0)
518 return ret;
519
520 break;
521
522 case RTC_AIE_ON:
523 if (!test_bit(HAS_ALARM, &ds1307->flags))
524 return -ENOTTY;
525 506
526 ret = i2c_smbus_read_byte_data(client, DS1337_REG_CONTROL); 507 ret = i2c_smbus_read_byte_data(client, DS1337_REG_CONTROL);
527 if (ret < 0) 508 if (ret < 0)
528 return ret; 509 return ret;
529 510
511 if (enabled)
530 ret |= DS1337_BIT_A1IE; 512 ret |= DS1337_BIT_A1IE;
513 else
514 ret &= ~DS1337_BIT_A1IE;
531 515
532 ret = i2c_smbus_write_byte_data(client, 516 ret = i2c_smbus_write_byte_data(client, DS1337_REG_CONTROL, ret);
533 DS1337_REG_CONTROL, ret); 517 if (ret < 0)
534 if (ret < 0) 518 return ret;
535 return ret;
536
537 break;
538
539 default:
540 return -ENOIOCTLCMD;
541 }
542 519
543 return 0; 520 return 0;
544} 521}
@@ -548,7 +525,7 @@ static const struct rtc_class_ops ds13xx_rtc_ops = {
548 .set_time = ds1307_set_time, 525 .set_time = ds1307_set_time,
549 .read_alarm = ds1337_read_alarm, 526 .read_alarm = ds1337_read_alarm,
550 .set_alarm = ds1337_set_alarm, 527 .set_alarm = ds1337_set_alarm,
551 .ioctl = ds1307_ioctl, 528 .alarm_irq_enable = ds1307_alarm_irq_enable,
552}; 529};
553 530
554/*----------------------------------------------------------------------*/ 531/*----------------------------------------------------------------------*/
diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c
index 47fb6357c346..d834a63ec4b0 100644
--- a/drivers/rtc/rtc-ds1374.c
+++ b/drivers/rtc/rtc-ds1374.c
@@ -307,42 +307,25 @@ unlock:
307 mutex_unlock(&ds1374->mutex); 307 mutex_unlock(&ds1374->mutex);
308} 308}
309 309
310static int ds1374_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) 310static int ds1374_alarm_irq_enable(struct device *dev, unsigned int enabled)
311{ 311{
312 struct i2c_client *client = to_i2c_client(dev); 312 struct i2c_client *client = to_i2c_client(dev);
313 struct ds1374 *ds1374 = i2c_get_clientdata(client); 313 struct ds1374 *ds1374 = i2c_get_clientdata(client);
314 int ret = -ENOIOCTLCMD; 314 int ret;
315 315
316 mutex_lock(&ds1374->mutex); 316 mutex_lock(&ds1374->mutex);
317 317
318 switch (cmd) { 318 ret = i2c_smbus_read_byte_data(client, DS1374_REG_CR);
319 case RTC_AIE_OFF: 319 if (ret < 0)
320 ret = i2c_smbus_read_byte_data(client, DS1374_REG_CR); 320 goto out;
321 if (ret < 0)
322 goto out;
323
324 ret &= ~DS1374_REG_CR_WACE;
325
326 ret = i2c_smbus_write_byte_data(client, DS1374_REG_CR, ret);
327 if (ret < 0)
328 goto out;
329
330 break;
331
332 case RTC_AIE_ON:
333 ret = i2c_smbus_read_byte_data(client, DS1374_REG_CR);
334 if (ret < 0)
335 goto out;
336 321
322 if (enabled) {
337 ret |= DS1374_REG_CR_WACE | DS1374_REG_CR_AIE; 323 ret |= DS1374_REG_CR_WACE | DS1374_REG_CR_AIE;
338 ret &= ~DS1374_REG_CR_WDALM; 324 ret &= ~DS1374_REG_CR_WDALM;
339 325 } else {
340 ret = i2c_smbus_write_byte_data(client, DS1374_REG_CR, ret); 326 ret &= ~DS1374_REG_CR_WACE;
341 if (ret < 0)
342 goto out;
343
344 break;
345 } 327 }
328 ret = i2c_smbus_write_byte_data(client, DS1374_REG_CR, ret);
346 329
347out: 330out:
348 mutex_unlock(&ds1374->mutex); 331 mutex_unlock(&ds1374->mutex);
@@ -354,7 +337,7 @@ static const struct rtc_class_ops ds1374_rtc_ops = {
354 .set_time = ds1374_set_time, 337 .set_time = ds1374_set_time,
355 .read_alarm = ds1374_read_alarm, 338 .read_alarm = ds1374_read_alarm,
356 .set_alarm = ds1374_set_alarm, 339 .set_alarm = ds1374_set_alarm,
357 .ioctl = ds1374_ioctl, 340 .alarm_irq_enable = ds1374_alarm_irq_enable,
358}; 341};
359 342
360static int ds1374_probe(struct i2c_client *client, 343static int ds1374_probe(struct i2c_client *client,
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index 5a8daa358066..69fe664a2228 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -213,41 +213,27 @@ static int m41t80_rtc_set_time(struct device *dev, struct rtc_time *tm)
213 return m41t80_set_datetime(to_i2c_client(dev), tm); 213 return m41t80_set_datetime(to_i2c_client(dev), tm);
214} 214}
215 215
216#if defined(CONFIG_RTC_INTF_DEV) || defined(CONFIG_RTC_INTF_DEV_MODULE) 216static int m41t80_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
217static int
218m41t80_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
219{ 217{
220 struct i2c_client *client = to_i2c_client(dev); 218 struct i2c_client *client = to_i2c_client(dev);
221 int rc; 219 int rc;
222 220
223 switch (cmd) {
224 case RTC_AIE_OFF:
225 case RTC_AIE_ON:
226 break;
227 default:
228 return -ENOIOCTLCMD;
229 }
230
231 rc = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_MON); 221 rc = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_MON);
232 if (rc < 0) 222 if (rc < 0)
233 goto err; 223 goto err;
234 switch (cmd) { 224
235 case RTC_AIE_OFF: 225 if (enabled)
236 rc &= ~M41T80_ALMON_AFE;
237 break;
238 case RTC_AIE_ON:
239 rc |= M41T80_ALMON_AFE; 226 rc |= M41T80_ALMON_AFE;
240 break; 227 else
241 } 228 rc &= ~M41T80_ALMON_AFE;
229
242 if (i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON, rc) < 0) 230 if (i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON, rc) < 0)
243 goto err; 231 goto err;
232
244 return 0; 233 return 0;
245err: 234err:
246 return -EIO; 235 return -EIO;
247} 236}
248#else
249#define m41t80_rtc_ioctl NULL
250#endif
251 237
252static int m41t80_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *t) 238static int m41t80_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *t)
253{ 239{
@@ -374,7 +360,7 @@ static struct rtc_class_ops m41t80_rtc_ops = {
374 .read_alarm = m41t80_rtc_read_alarm, 360 .read_alarm = m41t80_rtc_read_alarm,
375 .set_alarm = m41t80_rtc_set_alarm, 361 .set_alarm = m41t80_rtc_set_alarm,
376 .proc = m41t80_rtc_proc, 362 .proc = m41t80_rtc_proc,
377 .ioctl = m41t80_rtc_ioctl, 363 .alarm_irq_enable = m41t80_rtc_alarm_irq_enable,
378}; 364};
379 365
380#if defined(CONFIG_RTC_INTF_SYSFS) || defined(CONFIG_RTC_INTF_SYSFS_MODULE) 366#if defined(CONFIG_RTC_INTF_SYSFS) || defined(CONFIG_RTC_INTF_SYSFS_MODULE)
diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
index a99a0b554eb8..3978f4caf724 100644
--- a/drivers/rtc/rtc-m48t59.c
+++ b/drivers/rtc/rtc-m48t59.c
@@ -263,30 +263,21 @@ static int m48t59_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
263/* 263/*
264 * Handle commands from user-space 264 * Handle commands from user-space
265 */ 265 */
266static int m48t59_rtc_ioctl(struct device *dev, unsigned int cmd, 266static int m48t59_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
267 unsigned long arg)
268{ 267{
269 struct platform_device *pdev = to_platform_device(dev); 268 struct platform_device *pdev = to_platform_device(dev);
270 struct m48t59_plat_data *pdata = pdev->dev.platform_data; 269 struct m48t59_plat_data *pdata = pdev->dev.platform_data;
271 struct m48t59_private *m48t59 = platform_get_drvdata(pdev); 270 struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
272 unsigned long flags; 271 unsigned long flags;
273 int ret = 0;
274 272
275 spin_lock_irqsave(&m48t59->lock, flags); 273 spin_lock_irqsave(&m48t59->lock, flags);
276 switch (cmd) { 274 if (enabled)
277 case RTC_AIE_OFF: /* alarm interrupt off */
278 M48T59_WRITE(0x00, M48T59_INTR);
279 break;
280 case RTC_AIE_ON: /* alarm interrupt on */
281 M48T59_WRITE(M48T59_INTR_AFE, M48T59_INTR); 275 M48T59_WRITE(M48T59_INTR_AFE, M48T59_INTR);
282 break; 276 else
283 default: 277 M48T59_WRITE(0x00, M48T59_INTR);
284 ret = -ENOIOCTLCMD;
285 break;
286 }
287 spin_unlock_irqrestore(&m48t59->lock, flags); 278 spin_unlock_irqrestore(&m48t59->lock, flags);
288 279
289 return ret; 280 return 0;
290} 281}
291 282
292static int m48t59_rtc_proc(struct device *dev, struct seq_file *seq) 283static int m48t59_rtc_proc(struct device *dev, struct seq_file *seq)
@@ -330,12 +321,12 @@ static irqreturn_t m48t59_rtc_interrupt(int irq, void *dev_id)
330} 321}
331 322
332static const struct rtc_class_ops m48t59_rtc_ops = { 323static const struct rtc_class_ops m48t59_rtc_ops = {
333 .ioctl = m48t59_rtc_ioctl,
334 .read_time = m48t59_rtc_read_time, 324 .read_time = m48t59_rtc_read_time,
335 .set_time = m48t59_rtc_set_time, 325 .set_time = m48t59_rtc_set_time,
336 .read_alarm = m48t59_rtc_readalarm, 326 .read_alarm = m48t59_rtc_readalarm,
337 .set_alarm = m48t59_rtc_setalarm, 327 .set_alarm = m48t59_rtc_setalarm,
338 .proc = m48t59_rtc_proc, 328 .proc = m48t59_rtc_proc,
329 .alarm_irq_enable = m48t59_rtc_alarm_irq_enable,
339}; 330};
340 331
341static const struct rtc_class_ops m48t02_rtc_ops = { 332static const struct rtc_class_ops m48t02_rtc_ops = {
diff --git a/drivers/rtc/rtc-mrst.c b/drivers/rtc/rtc-mrst.c
index bcd0cf63eb16..1db62db8469d 100644
--- a/drivers/rtc/rtc-mrst.c
+++ b/drivers/rtc/rtc-mrst.c
@@ -255,42 +255,21 @@ static int mrst_irq_set_state(struct device *dev, int enabled)
255 return 0; 255 return 0;
256} 256}
257 257
258#if defined(CONFIG_RTC_INTF_DEV) || defined(CONFIG_RTC_INTF_DEV_MODULE)
259
260/* Currently, the vRTC doesn't support UIE ON/OFF */ 258/* Currently, the vRTC doesn't support UIE ON/OFF */
261static int 259static int mrst_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
262mrst_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
263{ 260{
264 struct mrst_rtc *mrst = dev_get_drvdata(dev); 261 struct mrst_rtc *mrst = dev_get_drvdata(dev);
265 unsigned long flags; 262 unsigned long flags;
266 263
267 switch (cmd) {
268 case RTC_AIE_OFF:
269 case RTC_AIE_ON:
270 if (!mrst->irq)
271 return -EINVAL;
272 break;
273 default:
274 /* PIE ON/OFF is handled by mrst_irq_set_state() */
275 return -ENOIOCTLCMD;
276 }
277
278 spin_lock_irqsave(&rtc_lock, flags); 264 spin_lock_irqsave(&rtc_lock, flags);
279 switch (cmd) { 265 if (enabled)
280 case RTC_AIE_OFF: /* alarm off */
281 mrst_irq_disable(mrst, RTC_AIE);
282 break;
283 case RTC_AIE_ON: /* alarm on */
284 mrst_irq_enable(mrst, RTC_AIE); 266 mrst_irq_enable(mrst, RTC_AIE);
285 break; 267 else
286 } 268 mrst_irq_disable(mrst, RTC_AIE);
287 spin_unlock_irqrestore(&rtc_lock, flags); 269 spin_unlock_irqrestore(&rtc_lock, flags);
288 return 0; 270 return 0;
289} 271}
290 272
291#else
292#define mrst_rtc_ioctl NULL
293#endif
294 273
295#if defined(CONFIG_RTC_INTF_PROC) || defined(CONFIG_RTC_INTF_PROC_MODULE) 274#if defined(CONFIG_RTC_INTF_PROC) || defined(CONFIG_RTC_INTF_PROC_MODULE)
296 275
@@ -317,13 +296,13 @@ static int mrst_procfs(struct device *dev, struct seq_file *seq)
317#endif 296#endif
318 297
319static const struct rtc_class_ops mrst_rtc_ops = { 298static const struct rtc_class_ops mrst_rtc_ops = {
320 .ioctl = mrst_rtc_ioctl,
321 .read_time = mrst_read_time, 299 .read_time = mrst_read_time,
322 .set_time = mrst_set_time, 300 .set_time = mrst_set_time,
323 .read_alarm = mrst_read_alarm, 301 .read_alarm = mrst_read_alarm,
324 .set_alarm = mrst_set_alarm, 302 .set_alarm = mrst_set_alarm,
325 .proc = mrst_procfs, 303 .proc = mrst_procfs,
326 .irq_set_state = mrst_irq_set_state, 304 .irq_set_state = mrst_irq_set_state,
305 .alarm_irq_enable = mrst_rtc_alarm_irq_enable,
327}; 306};
328 307
329static struct mrst_rtc mrst_rtc; 308static struct mrst_rtc mrst_rtc;
diff --git a/drivers/rtc/rtc-msm6242.c b/drivers/rtc/rtc-msm6242.c
index b2fff0ca49f8..67820626e18f 100644
--- a/drivers/rtc/rtc-msm6242.c
+++ b/drivers/rtc/rtc-msm6242.c
@@ -82,7 +82,7 @@ static inline unsigned int msm6242_read(struct msm6242_priv *priv,
82static inline void msm6242_write(struct msm6242_priv *priv, unsigned int val, 82static inline void msm6242_write(struct msm6242_priv *priv, unsigned int val,
83 unsigned int reg) 83 unsigned int reg)
84{ 84{
85 return __raw_writel(val, &priv->regs[reg]); 85 __raw_writel(val, &priv->regs[reg]);
86} 86}
87 87
88static inline void msm6242_set(struct msm6242_priv *priv, unsigned int val, 88static inline void msm6242_set(struct msm6242_priv *priv, unsigned int val,
diff --git a/drivers/rtc/rtc-mv.c b/drivers/rtc/rtc-mv.c
index bcca47298554..60627a764514 100644
--- a/drivers/rtc/rtc-mv.c
+++ b/drivers/rtc/rtc-mv.c
@@ -169,25 +169,19 @@ static int mv_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
169 return 0; 169 return 0;
170} 170}
171 171
172static int mv_rtc_ioctl(struct device *dev, unsigned int cmd, 172static int mv_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
173 unsigned long arg)
174{ 173{
175 struct platform_device *pdev = to_platform_device(dev); 174 struct platform_device *pdev = to_platform_device(dev);
176 struct rtc_plat_data *pdata = platform_get_drvdata(pdev); 175 struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
177 void __iomem *ioaddr = pdata->ioaddr; 176 void __iomem *ioaddr = pdata->ioaddr;
178 177
179 if (pdata->irq < 0) 178 if (pdata->irq < 0)
180 return -ENOIOCTLCMD; /* fall back into rtc-dev's emulation */ 179 return -EINVAL; /* fall back into rtc-dev's emulation */
181 switch (cmd) { 180
182 case RTC_AIE_OFF: 181 if (enabled)
183 writel(0, ioaddr + RTC_ALARM_INTERRUPT_MASK_REG_OFFS);
184 break;
185 case RTC_AIE_ON:
186 writel(1, ioaddr + RTC_ALARM_INTERRUPT_MASK_REG_OFFS); 182 writel(1, ioaddr + RTC_ALARM_INTERRUPT_MASK_REG_OFFS);
187 break; 183 else
188 default: 184 writel(0, ioaddr + RTC_ALARM_INTERRUPT_MASK_REG_OFFS);
189 return -ENOIOCTLCMD;
190 }
191 return 0; 185 return 0;
192} 186}
193 187
@@ -216,7 +210,7 @@ static const struct rtc_class_ops mv_rtc_alarm_ops = {
216 .set_time = mv_rtc_set_time, 210 .set_time = mv_rtc_set_time,
217 .read_alarm = mv_rtc_read_alarm, 211 .read_alarm = mv_rtc_read_alarm,
218 .set_alarm = mv_rtc_set_alarm, 212 .set_alarm = mv_rtc_set_alarm,
219 .ioctl = mv_rtc_ioctl, 213 .alarm_irq_enable = mv_rtc_alarm_irq_enable,
220}; 214};
221 215
222static int __devinit mv_rtc_probe(struct platform_device *pdev) 216static int __devinit mv_rtc_probe(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index e72b523c79a5..b4dbf3a319b3 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -143,8 +143,6 @@ omap_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
143 u8 reg; 143 u8 reg;
144 144
145 switch (cmd) { 145 switch (cmd) {
146 case RTC_AIE_OFF:
147 case RTC_AIE_ON:
148 case RTC_UIE_OFF: 146 case RTC_UIE_OFF:
149 case RTC_UIE_ON: 147 case RTC_UIE_ON:
150 break; 148 break;
@@ -156,13 +154,6 @@ omap_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
156 rtc_wait_not_busy(); 154 rtc_wait_not_busy();
157 reg = rtc_read(OMAP_RTC_INTERRUPTS_REG); 155 reg = rtc_read(OMAP_RTC_INTERRUPTS_REG);
158 switch (cmd) { 156 switch (cmd) {
159 /* AIE = Alarm Interrupt Enable */
160 case RTC_AIE_OFF:
161 reg &= ~OMAP_RTC_INTERRUPTS_IT_ALARM;
162 break;
163 case RTC_AIE_ON:
164 reg |= OMAP_RTC_INTERRUPTS_IT_ALARM;
165 break;
166 /* UIE = Update Interrupt Enable (1/second) */ 157 /* UIE = Update Interrupt Enable (1/second) */
167 case RTC_UIE_OFF: 158 case RTC_UIE_OFF:
168 reg &= ~OMAP_RTC_INTERRUPTS_IT_TIMER; 159 reg &= ~OMAP_RTC_INTERRUPTS_IT_TIMER;
@@ -182,6 +173,24 @@ omap_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
182#define omap_rtc_ioctl NULL 173#define omap_rtc_ioctl NULL
183#endif 174#endif
184 175
176static int omap_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
177{
178 u8 reg;
179
180 local_irq_disable();
181 rtc_wait_not_busy();
182 reg = rtc_read(OMAP_RTC_INTERRUPTS_REG);
183 if (enabled)
184 reg |= OMAP_RTC_INTERRUPTS_IT_ALARM;
185 else
186 reg &= ~OMAP_RTC_INTERRUPTS_IT_ALARM;
187 rtc_wait_not_busy();
188 rtc_write(reg, OMAP_RTC_INTERRUPTS_REG);
189 local_irq_enable();
190
191 return 0;
192}
193
185/* this hardware doesn't support "don't care" alarm fields */ 194/* this hardware doesn't support "don't care" alarm fields */
186static int tm2bcd(struct rtc_time *tm) 195static int tm2bcd(struct rtc_time *tm)
187{ 196{
@@ -309,6 +318,7 @@ static struct rtc_class_ops omap_rtc_ops = {
309 .set_time = omap_rtc_set_time, 318 .set_time = omap_rtc_set_time,
310 .read_alarm = omap_rtc_read_alarm, 319 .read_alarm = omap_rtc_read_alarm,
311 .set_alarm = omap_rtc_set_alarm, 320 .set_alarm = omap_rtc_set_alarm,
321 .alarm_irq_enable = omap_rtc_alarm_irq_enable,
312}; 322};
313 323
314static int omap_rtc_alarm; 324static int omap_rtc_alarm;
diff --git a/drivers/rtc/rtc-proc.c b/drivers/rtc/rtc-proc.c
index c086fc30a84c..242bbf86c74a 100644
--- a/drivers/rtc/rtc-proc.c
+++ b/drivers/rtc/rtc-proc.c
@@ -81,12 +81,16 @@ static int rtc_proc_show(struct seq_file *seq, void *offset)
81 81
82static int rtc_proc_open(struct inode *inode, struct file *file) 82static int rtc_proc_open(struct inode *inode, struct file *file)
83{ 83{
84 int ret;
84 struct rtc_device *rtc = PDE(inode)->data; 85 struct rtc_device *rtc = PDE(inode)->data;
85 86
86 if (!try_module_get(THIS_MODULE)) 87 if (!try_module_get(THIS_MODULE))
87 return -ENODEV; 88 return -ENODEV;
88 89
89 return single_open(file, rtc_proc_show, rtc); 90 ret = single_open(file, rtc_proc_show, rtc);
91 if (ret)
92 module_put(THIS_MODULE);
93 return ret;
90} 94}
91 95
92static int rtc_proc_release(struct inode *inode, struct file *file) 96static int rtc_proc_release(struct inode *inode, struct file *file)
diff --git a/drivers/rtc/rtc-rp5c01.c b/drivers/rtc/rtc-rp5c01.c
index 36eb66184461..694da39b6dd2 100644
--- a/drivers/rtc/rtc-rp5c01.c
+++ b/drivers/rtc/rtc-rp5c01.c
@@ -76,7 +76,7 @@ static inline unsigned int rp5c01_read(struct rp5c01_priv *priv,
76static inline void rp5c01_write(struct rp5c01_priv *priv, unsigned int val, 76static inline void rp5c01_write(struct rp5c01_priv *priv, unsigned int val,
77 unsigned int reg) 77 unsigned int reg)
78{ 78{
79 return __raw_writel(val, &priv->regs[reg]); 79 __raw_writel(val, &priv->regs[reg]);
80} 80}
81 81
82static void rp5c01_lock(struct rp5c01_priv *priv) 82static void rp5c01_lock(struct rp5c01_priv *priv)
diff --git a/drivers/rtc/rtc-rs5c372.c b/drivers/rtc/rtc-rs5c372.c
index dd14e202c2c8..6aaa1550e3b1 100644
--- a/drivers/rtc/rtc-rs5c372.c
+++ b/drivers/rtc/rtc-rs5c372.c
@@ -299,14 +299,6 @@ rs5c_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
299 if (rs5c->type == rtc_rs5c372a 299 if (rs5c->type == rtc_rs5c372a
300 && (buf & RS5C372A_CTRL1_SL1)) 300 && (buf & RS5C372A_CTRL1_SL1))
301 return -ENOIOCTLCMD; 301 return -ENOIOCTLCMD;
302 case RTC_AIE_OFF:
303 case RTC_AIE_ON:
304 /* these irq management calls only make sense for chips
305 * which are wired up to an IRQ.
306 */
307 if (!rs5c->has_irq)
308 return -ENOIOCTLCMD;
309 break;
310 default: 302 default:
311 return -ENOIOCTLCMD; 303 return -ENOIOCTLCMD;
312 } 304 }
@@ -317,12 +309,6 @@ rs5c_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
317 309
318 addr = RS5C_ADDR(RS5C_REG_CTRL1); 310 addr = RS5C_ADDR(RS5C_REG_CTRL1);
319 switch (cmd) { 311 switch (cmd) {
320 case RTC_AIE_OFF: /* alarm off */
321 buf &= ~RS5C_CTRL1_AALE;
322 break;
323 case RTC_AIE_ON: /* alarm on */
324 buf |= RS5C_CTRL1_AALE;
325 break;
326 case RTC_UIE_OFF: /* update off */ 312 case RTC_UIE_OFF: /* update off */
327 buf &= ~RS5C_CTRL1_CT_MASK; 313 buf &= ~RS5C_CTRL1_CT_MASK;
328 break; 314 break;
@@ -347,6 +333,39 @@ rs5c_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
347#endif 333#endif
348 334
349 335
336static int rs5c_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
337{
338 struct i2c_client *client = to_i2c_client(dev);
339 struct rs5c372 *rs5c = i2c_get_clientdata(client);
340 unsigned char buf;
341 int status, addr;
342
343 buf = rs5c->regs[RS5C_REG_CTRL1];
344
345 if (!rs5c->has_irq)
346 return -EINVAL;
347
348 status = rs5c_get_regs(rs5c);
349 if (status < 0)
350 return status;
351
352 addr = RS5C_ADDR(RS5C_REG_CTRL1);
353 if (enabled)
354 buf |= RS5C_CTRL1_AALE;
355 else
356 buf &= ~RS5C_CTRL1_AALE;
357
358 if (i2c_smbus_write_byte_data(client, addr, buf) < 0) {
359 printk(KERN_WARNING "%s: can't update alarm\n",
360 rs5c->rtc->name);
361 status = -EIO;
362 } else
363 rs5c->regs[RS5C_REG_CTRL1] = buf;
364
365 return status;
366}
367
368
350/* NOTE: Since RTC_WKALM_{RD,SET} were originally defined for EFI, 369/* NOTE: Since RTC_WKALM_{RD,SET} were originally defined for EFI,
351 * which only exposes a polled programming interface; and since 370 * which only exposes a polled programming interface; and since
352 * these calls map directly to those EFI requests; we don't demand 371 * these calls map directly to those EFI requests; we don't demand
@@ -466,6 +485,7 @@ static const struct rtc_class_ops rs5c372_rtc_ops = {
466 .set_time = rs5c372_rtc_set_time, 485 .set_time = rs5c372_rtc_set_time,
467 .read_alarm = rs5c_read_alarm, 486 .read_alarm = rs5c_read_alarm,
468 .set_alarm = rs5c_set_alarm, 487 .set_alarm = rs5c_set_alarm,
488 .alarm_irq_enable = rs5c_rtc_alarm_irq_enable,
469}; 489};
470 490
471#if defined(CONFIG_RTC_INTF_SYSFS) || defined(CONFIG_RTC_INTF_SYSFS_MODULE) 491#if defined(CONFIG_RTC_INTF_SYSFS) || defined(CONFIG_RTC_INTF_SYSFS_MODULE)
diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c
index 88ea52b8647a..5dfe5ffcb0d3 100644
--- a/drivers/rtc/rtc-sa1100.c
+++ b/drivers/rtc/rtc-sa1100.c
@@ -314,16 +314,6 @@ static int sa1100_rtc_ioctl(struct device *dev, unsigned int cmd,
314 unsigned long arg) 314 unsigned long arg)
315{ 315{
316 switch (cmd) { 316 switch (cmd) {
317 case RTC_AIE_OFF:
318 spin_lock_irq(&sa1100_rtc_lock);
319 RTSR &= ~RTSR_ALE;
320 spin_unlock_irq(&sa1100_rtc_lock);
321 return 0;
322 case RTC_AIE_ON:
323 spin_lock_irq(&sa1100_rtc_lock);
324 RTSR |= RTSR_ALE;
325 spin_unlock_irq(&sa1100_rtc_lock);
326 return 0;
327 case RTC_UIE_OFF: 317 case RTC_UIE_OFF:
328 spin_lock_irq(&sa1100_rtc_lock); 318 spin_lock_irq(&sa1100_rtc_lock);
329 RTSR &= ~RTSR_HZE; 319 RTSR &= ~RTSR_HZE;
@@ -338,6 +328,17 @@ static int sa1100_rtc_ioctl(struct device *dev, unsigned int cmd,
338 return -ENOIOCTLCMD; 328 return -ENOIOCTLCMD;
339} 329}
340 330
331static int sa1100_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
332{
333 spin_lock_irq(&sa1100_rtc_lock);
334 if (enabled)
335 RTSR |= RTSR_ALE;
336 else
337 RTSR &= ~RTSR_ALE;
338 spin_unlock_irq(&sa1100_rtc_lock);
339 return 0;
340}
341
341static int sa1100_rtc_read_time(struct device *dev, struct rtc_time *tm) 342static int sa1100_rtc_read_time(struct device *dev, struct rtc_time *tm)
342{ 343{
343 rtc_time_to_tm(RCNR, tm); 344 rtc_time_to_tm(RCNR, tm);
@@ -410,6 +411,7 @@ static const struct rtc_class_ops sa1100_rtc_ops = {
410 .proc = sa1100_rtc_proc, 411 .proc = sa1100_rtc_proc,
411 .irq_set_freq = sa1100_irq_set_freq, 412 .irq_set_freq = sa1100_irq_set_freq,
412 .irq_set_state = sa1100_irq_set_state, 413 .irq_set_state = sa1100_irq_set_state,
414 .alarm_irq_enable = sa1100_rtc_alarm_irq_enable,
413}; 415};
414 416
415static int sa1100_rtc_probe(struct platform_device *pdev) 417static int sa1100_rtc_probe(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c
index 06e41ed93230..93314a9e7fa9 100644
--- a/drivers/rtc/rtc-sh.c
+++ b/drivers/rtc/rtc-sh.c
@@ -350,10 +350,6 @@ static int sh_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
350 unsigned int ret = 0; 350 unsigned int ret = 0;
351 351
352 switch (cmd) { 352 switch (cmd) {
353 case RTC_AIE_OFF:
354 case RTC_AIE_ON:
355 sh_rtc_setaie(dev, cmd == RTC_AIE_ON);
356 break;
357 case RTC_UIE_OFF: 353 case RTC_UIE_OFF:
358 rtc->periodic_freq &= ~PF_OXS; 354 rtc->periodic_freq &= ~PF_OXS;
359 sh_rtc_setcie(dev, 0); 355 sh_rtc_setcie(dev, 0);
@@ -369,6 +365,12 @@ static int sh_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
369 return ret; 365 return ret;
370} 366}
371 367
368static int sh_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
369{
370 sh_rtc_setaie(dev, enabled);
371 return 0;
372}
373
372static int sh_rtc_read_time(struct device *dev, struct rtc_time *tm) 374static int sh_rtc_read_time(struct device *dev, struct rtc_time *tm)
373{ 375{
374 struct platform_device *pdev = to_platform_device(dev); 376 struct platform_device *pdev = to_platform_device(dev);
@@ -604,6 +606,7 @@ static struct rtc_class_ops sh_rtc_ops = {
604 .irq_set_state = sh_rtc_irq_set_state, 606 .irq_set_state = sh_rtc_irq_set_state,
605 .irq_set_freq = sh_rtc_irq_set_freq, 607 .irq_set_freq = sh_rtc_irq_set_freq,
606 .proc = sh_rtc_proc, 608 .proc = sh_rtc_proc,
609 .alarm_irq_enable = sh_rtc_alarm_irq_enable,
607}; 610};
608 611
609static int __init sh_rtc_probe(struct platform_device *pdev) 612static int __init sh_rtc_probe(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-test.c b/drivers/rtc/rtc-test.c
index 51725f7755b0..a82d6fe97076 100644
--- a/drivers/rtc/rtc-test.c
+++ b/drivers/rtc/rtc-test.c
@@ -50,24 +50,9 @@ static int test_rtc_proc(struct device *dev, struct seq_file *seq)
50 return 0; 50 return 0;
51} 51}
52 52
53static int test_rtc_ioctl(struct device *dev, unsigned int cmd, 53static int test_rtc_alarm_irq_enable(struct device *dev, unsigned int enable)
54 unsigned long arg)
55{ 54{
56 /* We do support interrupts, they're generated 55 return 0;
57 * using the sysfs interface.
58 */
59 switch (cmd) {
60 case RTC_PIE_ON:
61 case RTC_PIE_OFF:
62 case RTC_UIE_ON:
63 case RTC_UIE_OFF:
64 case RTC_AIE_ON:
65 case RTC_AIE_OFF:
66 return 0;
67
68 default:
69 return -ENOIOCTLCMD;
70 }
71} 56}
72 57
73static const struct rtc_class_ops test_rtc_ops = { 58static const struct rtc_class_ops test_rtc_ops = {
@@ -76,7 +61,7 @@ static const struct rtc_class_ops test_rtc_ops = {
76 .read_alarm = test_rtc_read_alarm, 61 .read_alarm = test_rtc_read_alarm,
77 .set_alarm = test_rtc_set_alarm, 62 .set_alarm = test_rtc_set_alarm,
78 .set_mmss = test_rtc_set_mmss, 63 .set_mmss = test_rtc_set_mmss,
79 .ioctl = test_rtc_ioctl, 64 .alarm_irq_enable = test_rtc_alarm_irq_enable,
80}; 65};
81 66
82static ssize_t test_irq_show(struct device *dev, 67static ssize_t test_irq_show(struct device *dev,
diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c
index c3244244e8cf..769190ac6d11 100644
--- a/drivers/rtc/rtc-vr41xx.c
+++ b/drivers/rtc/rtc-vr41xx.c
@@ -240,26 +240,6 @@ static int vr41xx_rtc_irq_set_state(struct device *dev, int enabled)
240static int vr41xx_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) 240static int vr41xx_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
241{ 241{
242 switch (cmd) { 242 switch (cmd) {
243 case RTC_AIE_ON:
244 spin_lock_irq(&rtc_lock);
245
246 if (!alarm_enabled) {
247 enable_irq(aie_irq);
248 alarm_enabled = 1;
249 }
250
251 spin_unlock_irq(&rtc_lock);
252 break;
253 case RTC_AIE_OFF:
254 spin_lock_irq(&rtc_lock);
255
256 if (alarm_enabled) {
257 disable_irq(aie_irq);
258 alarm_enabled = 0;
259 }
260
261 spin_unlock_irq(&rtc_lock);
262 break;
263 case RTC_EPOCH_READ: 243 case RTC_EPOCH_READ:
264 return put_user(epoch, (unsigned long __user *)arg); 244 return put_user(epoch, (unsigned long __user *)arg);
265 case RTC_EPOCH_SET: 245 case RTC_EPOCH_SET:
@@ -275,6 +255,24 @@ static int vr41xx_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long
275 return 0; 255 return 0;
276} 256}
277 257
258static int vr41xx_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
259{
260 spin_lock_irq(&rtc_lock);
261 if (enabled) {
262 if (!alarm_enabled) {
263 enable_irq(aie_irq);
264 alarm_enabled = 1;
265 }
266 } else {
267 if (alarm_enabled) {
268 disable_irq(aie_irq);
269 alarm_enabled = 0;
270 }
271 }
272 spin_unlock_irq(&rtc_lock);
273 return 0;
274}
275
278static irqreturn_t elapsedtime_interrupt(int irq, void *dev_id) 276static irqreturn_t elapsedtime_interrupt(int irq, void *dev_id)
279{ 277{
280 struct platform_device *pdev = (struct platform_device *)dev_id; 278 struct platform_device *pdev = (struct platform_device *)dev_id;
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index 4155805dcdff..2b771f18d1ad 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -319,6 +319,9 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
319 319
320 private = (struct dasd_eckd_private *) device->private; 320 private = (struct dasd_eckd_private *) device->private;
321 lcu = private->lcu; 321 lcu = private->lcu;
322 /* nothing to do if already disconnected */
323 if (!lcu)
324 return;
322 device->discipline->get_uid(device, &uid); 325 device->discipline->get_uid(device, &uid);
323 spin_lock_irqsave(&lcu->lock, flags); 326 spin_lock_irqsave(&lcu->lock, flags);
324 list_del_init(&device->alias_list); 327 list_del_init(&device->alias_list);
@@ -680,6 +683,9 @@ int dasd_alias_remove_device(struct dasd_device *device)
680 683
681 private = (struct dasd_eckd_private *) device->private; 684 private = (struct dasd_eckd_private *) device->private;
682 lcu = private->lcu; 685 lcu = private->lcu;
686 /* nothing to do if already removed */
687 if (!lcu)
688 return 0;
683 spin_lock_irqsave(&lcu->lock, flags); 689 spin_lock_irqsave(&lcu->lock, flags);
684 _remove_device_from_lcu(lcu, device); 690 _remove_device_from_lcu(lcu, device);
685 spin_unlock_irqrestore(&lcu->lock, flags); 691 spin_unlock_irqrestore(&lcu->lock, flags);
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 318672d05563..a9fe23d5bd0f 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -72,7 +72,7 @@ static struct dasd_discipline dasd_eckd_discipline;
72static struct ccw_device_id dasd_eckd_ids[] = { 72static struct ccw_device_id dasd_eckd_ids[] = {
73 { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info = 0x1}, 73 { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info = 0x1},
74 { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info = 0x2}, 74 { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info = 0x2},
75 { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3390, 0), .driver_info = 0x3}, 75 { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3380, 0), .driver_info = 0x3},
76 { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info = 0x4}, 76 { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info = 0x4},
77 { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info = 0x5}, 77 { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info = 0x5},
78 { CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info = 0x6}, 78 { CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info = 0x6},
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index e9fff2b9bce2..5640c89cd9de 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -476,7 +476,7 @@ static inline void inbound_primed(struct qdio_q *q, int count)
476static int get_inbound_buffer_frontier(struct qdio_q *q) 476static int get_inbound_buffer_frontier(struct qdio_q *q)
477{ 477{
478 int count, stop; 478 int count, stop;
479 unsigned char state; 479 unsigned char state = 0;
480 480
481 /* 481 /*
482 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved 482 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
@@ -643,7 +643,7 @@ void qdio_inbound_processing(unsigned long data)
643static int get_outbound_buffer_frontier(struct qdio_q *q) 643static int get_outbound_buffer_frontier(struct qdio_q *q)
644{ 644{
645 int count, stop; 645 int count, stop;
646 unsigned char state; 646 unsigned char state = 0;
647 647
648 if (need_siga_sync(q)) 648 if (need_siga_sync(q))
649 if (((queue_type(q) != QDIO_IQDIO_QFMT) && 649 if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 65ebee0a3266..b6a6356d09b3 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -565,7 +565,7 @@ static int netiucv_callback_connreq(struct iucv_path *path,
565 struct iucv_event ev; 565 struct iucv_event ev;
566 int rc; 566 int rc;
567 567
568 if (memcmp(iucvMagic, ipuser, sizeof(ipuser))) 568 if (memcmp(iucvMagic, ipuser, 16))
569 /* ipuser must match iucvMagic. */ 569 /* ipuser must match iucvMagic. */
570 return -EINVAL; 570 return -EINVAL;
571 rc = -EINVAL; 571 rc = -EINVAL;
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 29f848bfc12f..019ae58ab913 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -988,16 +988,30 @@ static void qeth_get_channel_path_desc(struct qeth_card *card)
988 chp_dsc = (struct channelPath_dsc *)ccw_device_get_chp_desc(ccwdev, 0); 988 chp_dsc = (struct channelPath_dsc *)ccw_device_get_chp_desc(ccwdev, 0);
989 if (chp_dsc != NULL) { 989 if (chp_dsc != NULL) {
990 /* CHPP field bit 6 == 1 -> single queue */ 990 /* CHPP field bit 6 == 1 -> single queue */
991 if ((chp_dsc->chpp & 0x02) == 0x02) 991 if ((chp_dsc->chpp & 0x02) == 0x02) {
992 if ((atomic_read(&card->qdio.state) !=
993 QETH_QDIO_UNINITIALIZED) &&
994 (card->qdio.no_out_queues == 4))
995 /* change from 4 to 1 outbound queues */
996 qeth_free_qdio_buffers(card);
992 card->qdio.no_out_queues = 1; 997 card->qdio.no_out_queues = 1;
998 if (card->qdio.default_out_queue != 0)
999 dev_info(&card->gdev->dev,
1000 "Priority Queueing not supported\n");
1001 card->qdio.default_out_queue = 0;
1002 } else {
1003 if ((atomic_read(&card->qdio.state) !=
1004 QETH_QDIO_UNINITIALIZED) &&
1005 (card->qdio.no_out_queues == 1)) {
1006 /* change from 1 to 4 outbound queues */
1007 qeth_free_qdio_buffers(card);
1008 card->qdio.default_out_queue = 2;
1009 }
1010 card->qdio.no_out_queues = 4;
1011 }
993 card->info.func_level = 0x4100 + chp_dsc->desc; 1012 card->info.func_level = 0x4100 + chp_dsc->desc;
994 kfree(chp_dsc); 1013 kfree(chp_dsc);
995 } 1014 }
996 if (card->qdio.no_out_queues == 1) {
997 card->qdio.default_out_queue = 0;
998 dev_info(&card->gdev->dev,
999 "Priority Queueing not supported\n");
1000 }
1001 QETH_DBF_TEXT_(SETUP, 2, "nr:%x", card->qdio.no_out_queues); 1015 QETH_DBF_TEXT_(SETUP, 2, "nr:%x", card->qdio.no_out_queues);
1002 QETH_DBF_TEXT_(SETUP, 2, "lvl:%02x", card->info.func_level); 1016 QETH_DBF_TEXT_(SETUP, 2, "lvl:%02x", card->info.func_level);
1003 return; 1017 return;
@@ -1832,33 +1846,6 @@ static inline int qeth_get_initial_mtu_for_card(struct qeth_card *card)
1832 } 1846 }
1833} 1847}
1834 1848
1835static inline int qeth_get_max_mtu_for_card(int cardtype)
1836{
1837 switch (cardtype) {
1838
1839 case QETH_CARD_TYPE_UNKNOWN:
1840 case QETH_CARD_TYPE_OSD:
1841 case QETH_CARD_TYPE_OSN:
1842 case QETH_CARD_TYPE_OSM:
1843 case QETH_CARD_TYPE_OSX:
1844 return 61440;
1845 case QETH_CARD_TYPE_IQD:
1846 return 57344;
1847 default:
1848 return 1500;
1849 }
1850}
1851
1852static inline int qeth_get_mtu_out_of_mpc(int cardtype)
1853{
1854 switch (cardtype) {
1855 case QETH_CARD_TYPE_IQD:
1856 return 1;
1857 default:
1858 return 0;
1859 }
1860}
1861
1862static inline int qeth_get_mtu_outof_framesize(int framesize) 1849static inline int qeth_get_mtu_outof_framesize(int framesize)
1863{ 1850{
1864 switch (framesize) { 1851 switch (framesize) {
@@ -1881,10 +1868,9 @@ static inline int qeth_mtu_is_valid(struct qeth_card *card, int mtu)
1881 case QETH_CARD_TYPE_OSD: 1868 case QETH_CARD_TYPE_OSD:
1882 case QETH_CARD_TYPE_OSM: 1869 case QETH_CARD_TYPE_OSM:
1883 case QETH_CARD_TYPE_OSX: 1870 case QETH_CARD_TYPE_OSX:
1884 return ((mtu >= 576) && (mtu <= 61440));
1885 case QETH_CARD_TYPE_IQD: 1871 case QETH_CARD_TYPE_IQD:
1886 return ((mtu >= 576) && 1872 return ((mtu >= 576) &&
1887 (mtu <= card->info.max_mtu + 4096 - 32)); 1873 (mtu <= card->info.max_mtu));
1888 case QETH_CARD_TYPE_OSN: 1874 case QETH_CARD_TYPE_OSN:
1889 case QETH_CARD_TYPE_UNKNOWN: 1875 case QETH_CARD_TYPE_UNKNOWN:
1890 default: 1876 default:
@@ -1907,7 +1893,7 @@ static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
1907 memcpy(&card->token.ulp_filter_r, 1893 memcpy(&card->token.ulp_filter_r,
1908 QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data), 1894 QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data),
1909 QETH_MPC_TOKEN_LENGTH); 1895 QETH_MPC_TOKEN_LENGTH);
1910 if (qeth_get_mtu_out_of_mpc(card->info.type)) { 1896 if (card->info.type == QETH_CARD_TYPE_IQD) {
1911 memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2); 1897 memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2);
1912 mtu = qeth_get_mtu_outof_framesize(framesize); 1898 mtu = qeth_get_mtu_outof_framesize(framesize);
1913 if (!mtu) { 1899 if (!mtu) {
@@ -1915,12 +1901,21 @@ static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
1915 QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc); 1901 QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc);
1916 return 0; 1902 return 0;
1917 } 1903 }
1918 card->info.max_mtu = mtu; 1904 if (card->info.initial_mtu && (card->info.initial_mtu != mtu)) {
1905 /* frame size has changed */
1906 if (card->dev &&
1907 ((card->dev->mtu == card->info.initial_mtu) ||
1908 (card->dev->mtu > mtu)))
1909 card->dev->mtu = mtu;
1910 qeth_free_qdio_buffers(card);
1911 }
1919 card->info.initial_mtu = mtu; 1912 card->info.initial_mtu = mtu;
1913 card->info.max_mtu = mtu;
1920 card->qdio.in_buf_size = mtu + 2 * PAGE_SIZE; 1914 card->qdio.in_buf_size = mtu + 2 * PAGE_SIZE;
1921 } else { 1915 } else {
1922 card->info.initial_mtu = qeth_get_initial_mtu_for_card(card); 1916 card->info.initial_mtu = qeth_get_initial_mtu_for_card(card);
1923 card->info.max_mtu = qeth_get_max_mtu_for_card(card->info.type); 1917 card->info.max_mtu = *(__u16 *)QETH_ULP_ENABLE_RESP_MAX_MTU(
1918 iob->data);
1924 card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT; 1919 card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
1925 } 1920 }
1926 1921
@@ -3775,6 +3770,47 @@ static inline int qeth_get_qdio_q_format(struct qeth_card *card)
3775 } 3770 }
3776} 3771}
3777 3772
3773static void qeth_determine_capabilities(struct qeth_card *card)
3774{
3775 int rc;
3776 int length;
3777 char *prcd;
3778 struct ccw_device *ddev;
3779 int ddev_offline = 0;
3780
3781 QETH_DBF_TEXT(SETUP, 2, "detcapab");
3782 ddev = CARD_DDEV(card);
3783 if (!ddev->online) {
3784 ddev_offline = 1;
3785 rc = ccw_device_set_online(ddev);
3786 if (rc) {
3787 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
3788 goto out;
3789 }
3790 }
3791
3792 rc = qeth_read_conf_data(card, (void **) &prcd, &length);
3793 if (rc) {
3794 QETH_DBF_MESSAGE(2, "%s qeth_read_conf_data returned %i\n",
3795 dev_name(&card->gdev->dev), rc);
3796 QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
3797 goto out_offline;
3798 }
3799 qeth_configure_unitaddr(card, prcd);
3800 qeth_configure_blkt_default(card, prcd);
3801 kfree(prcd);
3802
3803 rc = qdio_get_ssqd_desc(ddev, &card->ssqd);
3804 if (rc)
3805 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
3806
3807out_offline:
3808 if (ddev_offline == 1)
3809 ccw_device_set_offline(ddev);
3810out:
3811 return;
3812}
3813
3778static int qeth_qdio_establish(struct qeth_card *card) 3814static int qeth_qdio_establish(struct qeth_card *card)
3779{ 3815{
3780 struct qdio_initialize init_data; 3816 struct qdio_initialize init_data;
@@ -3905,6 +3941,7 @@ int qeth_core_hardsetup_card(struct qeth_card *card)
3905 3941
3906 QETH_DBF_TEXT(SETUP, 2, "hrdsetup"); 3942 QETH_DBF_TEXT(SETUP, 2, "hrdsetup");
3907 atomic_set(&card->force_alloc_skb, 0); 3943 atomic_set(&card->force_alloc_skb, 0);
3944 qeth_get_channel_path_desc(card);
3908retry: 3945retry:
3909 if (retries) 3946 if (retries)
3910 QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n", 3947 QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n",
@@ -3933,6 +3970,7 @@ retriable:
3933 else 3970 else
3934 goto retry; 3971 goto retry;
3935 } 3972 }
3973 qeth_determine_capabilities(card);
3936 qeth_init_tokens(card); 3974 qeth_init_tokens(card);
3937 qeth_init_func_level(card); 3975 qeth_init_func_level(card);
3938 rc = qeth_idx_activate_channel(&card->read, qeth_idx_read_cb); 3976 rc = qeth_idx_activate_channel(&card->read, qeth_idx_read_cb);
@@ -4202,41 +4240,6 @@ void qeth_core_free_discipline(struct qeth_card *card)
4202 card->discipline.ccwgdriver = NULL; 4240 card->discipline.ccwgdriver = NULL;
4203} 4241}
4204 4242
4205static void qeth_determine_capabilities(struct qeth_card *card)
4206{
4207 int rc;
4208 int length;
4209 char *prcd;
4210
4211 QETH_DBF_TEXT(SETUP, 2, "detcapab");
4212 rc = ccw_device_set_online(CARD_DDEV(card));
4213 if (rc) {
4214 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
4215 goto out;
4216 }
4217
4218
4219 rc = qeth_read_conf_data(card, (void **) &prcd, &length);
4220 if (rc) {
4221 QETH_DBF_MESSAGE(2, "%s qeth_read_conf_data returned %i\n",
4222 dev_name(&card->gdev->dev), rc);
4223 QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
4224 goto out_offline;
4225 }
4226 qeth_configure_unitaddr(card, prcd);
4227 qeth_configure_blkt_default(card, prcd);
4228 kfree(prcd);
4229
4230 rc = qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd);
4231 if (rc)
4232 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
4233
4234out_offline:
4235 ccw_device_set_offline(CARD_DDEV(card));
4236out:
4237 return;
4238}
4239
4240static int qeth_core_probe_device(struct ccwgroup_device *gdev) 4243static int qeth_core_probe_device(struct ccwgroup_device *gdev)
4241{ 4244{
4242 struct qeth_card *card; 4245 struct qeth_card *card;
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 2ac8f6aff5a4..ada0fe782373 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -573,13 +573,13 @@ static int qeth_l2_send_setmac_cb(struct qeth_card *card,
573 case IPA_RC_L2_DUP_LAYER3_MAC: 573 case IPA_RC_L2_DUP_LAYER3_MAC:
574 dev_warn(&card->gdev->dev, 574 dev_warn(&card->gdev->dev,
575 "MAC address %pM already exists\n", 575 "MAC address %pM already exists\n",
576 card->dev->dev_addr); 576 cmd->data.setdelmac.mac);
577 break; 577 break;
578 case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP: 578 case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP:
579 case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP: 579 case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP:
580 dev_warn(&card->gdev->dev, 580 dev_warn(&card->gdev->dev,
581 "MAC address %pM is not authorized\n", 581 "MAC address %pM is not authorized\n",
582 card->dev->dev_addr); 582 cmd->data.setdelmac.mac);
583 break; 583 break;
584 default: 584 default:
585 break; 585 break;
diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c
index 65e1cf104943..207b7d742443 100644
--- a/drivers/s390/net/smsgiucv.c
+++ b/drivers/s390/net/smsgiucv.c
@@ -60,7 +60,7 @@ static struct iucv_handler smsg_handler = {
60static int smsg_path_pending(struct iucv_path *path, u8 ipvmid[8], 60static int smsg_path_pending(struct iucv_path *path, u8 ipvmid[8],
61 u8 ipuser[16]) 61 u8 ipuser[16])
62{ 62{
63 if (strncmp(ipvmid, "*MSG ", sizeof(ipvmid)) != 0) 63 if (strncmp(ipvmid, "*MSG ", 8) != 0)
64 return -EINVAL; 64 return -EINVAL;
65 /* Path pending from *MSG. */ 65 /* Path pending from *MSG. */
66 return iucv_path_accept(path, &smsg_handler, "SMSGIUCV ", NULL); 66 return iucv_path_accept(path, &smsg_handler, "SMSGIUCV ", NULL);
diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h
index 475c31ae985c..77b26f5b9c33 100644
--- a/drivers/scsi/arcmsr/arcmsr.h
+++ b/drivers/scsi/arcmsr/arcmsr.h
@@ -2,7 +2,7 @@
2******************************************************************************* 2*******************************************************************************
3** O.S : Linux 3** O.S : Linux
4** FILE NAME : arcmsr.h 4** FILE NAME : arcmsr.h
5** BY : Erich Chen 5** BY : Nick Cheng
6** Description: SCSI RAID Device Driver for 6** Description: SCSI RAID Device Driver for
7** ARECA RAID Host adapter 7** ARECA RAID Host adapter
8******************************************************************************* 8*******************************************************************************
@@ -46,8 +46,12 @@
46struct device_attribute; 46struct device_attribute;
47/*The limit of outstanding scsi command that firmware can handle*/ 47/*The limit of outstanding scsi command that firmware can handle*/
48#define ARCMSR_MAX_OUTSTANDING_CMD 256 48#define ARCMSR_MAX_OUTSTANDING_CMD 256
49#define ARCMSR_MAX_FREECCB_NUM 320 49#ifdef CONFIG_XEN
50#define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.15 2010/02/02" 50 #define ARCMSR_MAX_FREECCB_NUM 160
51#else
52 #define ARCMSR_MAX_FREECCB_NUM 320
53#endif
54#define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.15 2010/08/05"
51#define ARCMSR_SCSI_INITIATOR_ID 255 55#define ARCMSR_SCSI_INITIATOR_ID 255
52#define ARCMSR_MAX_XFER_SECTORS 512 56#define ARCMSR_MAX_XFER_SECTORS 512
53#define ARCMSR_MAX_XFER_SECTORS_B 4096 57#define ARCMSR_MAX_XFER_SECTORS_B 4096
@@ -60,7 +64,6 @@ struct device_attribute;
60#define ARCMSR_MAX_HBB_POSTQUEUE 264 64#define ARCMSR_MAX_HBB_POSTQUEUE 264
61#define ARCMSR_MAX_XFER_LEN 0x26000 /* 152K */ 65#define ARCMSR_MAX_XFER_LEN 0x26000 /* 152K */
62#define ARCMSR_CDB_SG_PAGE_LENGTH 256 66#define ARCMSR_CDB_SG_PAGE_LENGTH 256
63#define SCSI_CMD_ARECA_SPECIFIC 0xE1
64#ifndef PCI_DEVICE_ID_ARECA_1880 67#ifndef PCI_DEVICE_ID_ARECA_1880
65#define PCI_DEVICE_ID_ARECA_1880 0x1880 68#define PCI_DEVICE_ID_ARECA_1880 0x1880
66 #endif 69 #endif
diff --git a/drivers/scsi/arcmsr/arcmsr_attr.c b/drivers/scsi/arcmsr/arcmsr_attr.c
index a4e04c50c436..acdae33de521 100644
--- a/drivers/scsi/arcmsr/arcmsr_attr.c
+++ b/drivers/scsi/arcmsr/arcmsr_attr.c
@@ -2,7 +2,7 @@
2******************************************************************************* 2*******************************************************************************
3** O.S : Linux 3** O.S : Linux
4** FILE NAME : arcmsr_attr.c 4** FILE NAME : arcmsr_attr.c
5** BY : Erich Chen 5** BY : Nick Cheng
6** Description: attributes exported to sysfs and device host 6** Description: attributes exported to sysfs and device host
7******************************************************************************* 7*******************************************************************************
8** Copyright (C) 2002 - 2005, Areca Technology Corporation All rights reserved 8** Copyright (C) 2002 - 2005, Areca Technology Corporation All rights reserved
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 1cadcd6b7da6..984bd527c6c9 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -2,7 +2,7 @@
2******************************************************************************* 2*******************************************************************************
3** O.S : Linux 3** O.S : Linux
4** FILE NAME : arcmsr_hba.c 4** FILE NAME : arcmsr_hba.c
5** BY : Erich Chen 5** BY : Nick Cheng
6** Description: SCSI RAID Device Driver for 6** Description: SCSI RAID Device Driver for
7** ARECA RAID Host adapter 7** ARECA RAID Host adapter
8******************************************************************************* 8*******************************************************************************
@@ -76,7 +76,7 @@ MODULE_DESCRIPTION("ARECA (ARC11xx/12xx/16xx/1880) SATA/SAS RAID Host Bus Adapte
76MODULE_LICENSE("Dual BSD/GPL"); 76MODULE_LICENSE("Dual BSD/GPL");
77MODULE_VERSION(ARCMSR_DRIVER_VERSION); 77MODULE_VERSION(ARCMSR_DRIVER_VERSION);
78static int sleeptime = 10; 78static int sleeptime = 10;
79static int retrycount = 30; 79static int retrycount = 12;
80wait_queue_head_t wait_q; 80wait_queue_head_t wait_q;
81static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, 81static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
82 struct scsi_cmnd *cmd); 82 struct scsi_cmnd *cmd);
@@ -187,7 +187,6 @@ int arcmsr_sleep_for_bus_reset(struct scsi_cmnd *cmd)
187 if (isleep > 0) { 187 if (isleep > 0) {
188 msleep(isleep*1000); 188 msleep(isleep*1000);
189 } 189 }
190 printk(KERN_NOTICE "wake-up\n");
191 return 0; 190 return 0;
192} 191}
193 192
@@ -921,7 +920,6 @@ static void arcmsr_report_ccb_state(struct AdapterControlBlock *acb,
921} 920}
922 921
923static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, struct CommandControlBlock *pCCB, bool error) 922static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, struct CommandControlBlock *pCCB, bool error)
924
925{ 923{
926 int id, lun; 924 int id, lun;
927 if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) { 925 if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) {
@@ -948,7 +946,7 @@ static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, struct Comma
948 , pCCB->startdone 946 , pCCB->startdone
949 , atomic_read(&acb->ccboutstandingcount)); 947 , atomic_read(&acb->ccboutstandingcount));
950 return; 948 return;
951 } 949 }
952 arcmsr_report_ccb_state(acb, pCCB, error); 950 arcmsr_report_ccb_state(acb, pCCB, error);
953} 951}
954 952
@@ -981,7 +979,7 @@ static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
981 case ACB_ADAPTER_TYPE_B: { 979 case ACB_ADAPTER_TYPE_B: {
982 struct MessageUnit_B *reg = acb->pmuB; 980 struct MessageUnit_B *reg = acb->pmuB;
983 /*clear all outbound posted Q*/ 981 /*clear all outbound posted Q*/
984 writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, &reg->iop2drv_doorbell); /* clear doorbell interrupt */ 982 writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); /* clear doorbell interrupt */
985 for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) { 983 for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
986 if ((flag_ccb = readl(&reg->done_qbuffer[i])) != 0) { 984 if ((flag_ccb = readl(&reg->done_qbuffer[i])) != 0) {
987 writel(0, &reg->done_qbuffer[i]); 985 writel(0, &reg->done_qbuffer[i]);
@@ -1511,7 +1509,6 @@ static void arcmsr_hba_postqueue_isr(struct AdapterControlBlock *acb)
1511 arcmsr_drain_donequeue(acb, pCCB, error); 1509 arcmsr_drain_donequeue(acb, pCCB, error);
1512 } 1510 }
1513} 1511}
1514
1515static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb) 1512static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb)
1516{ 1513{
1517 uint32_t index; 1514 uint32_t index;
@@ -2106,10 +2103,6 @@ static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd,
2106 if (atomic_read(&acb->ccboutstandingcount) >= 2103 if (atomic_read(&acb->ccboutstandingcount) >=
2107 ARCMSR_MAX_OUTSTANDING_CMD) 2104 ARCMSR_MAX_OUTSTANDING_CMD)
2108 return SCSI_MLQUEUE_HOST_BUSY; 2105 return SCSI_MLQUEUE_HOST_BUSY;
2109 if ((scsicmd == SCSI_CMD_ARECA_SPECIFIC)) {
2110 printk(KERN_NOTICE "Receiveing SCSI_CMD_ARECA_SPECIFIC command..\n");
2111 return 0;
2112 }
2113 ccb = arcmsr_get_freeccb(acb); 2106 ccb = arcmsr_get_freeccb(acb);
2114 if (!ccb) 2107 if (!ccb)
2115 return SCSI_MLQUEUE_HOST_BUSY; 2108 return SCSI_MLQUEUE_HOST_BUSY;
@@ -2393,6 +2386,7 @@ static int arcmsr_polling_hbb_ccbdone(struct AdapterControlBlock *acb,
2393 int index, rtn; 2386 int index, rtn;
2394 bool error; 2387 bool error;
2395 polling_hbb_ccb_retry: 2388 polling_hbb_ccb_retry:
2389
2396 poll_count++; 2390 poll_count++;
2397 /* clear doorbell interrupt */ 2391 /* clear doorbell interrupt */
2398 writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); 2392 writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
@@ -2663,6 +2657,7 @@ static void arcmsr_request_hba_device_map(struct AdapterControlBlock *acb)
2663{ 2657{
2664 struct MessageUnit_A __iomem *reg = acb->pmuA; 2658 struct MessageUnit_A __iomem *reg = acb->pmuA;
2665 if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0 ) || ((acb->acb_flags & ACB_F_ABORT) != 0 )){ 2659 if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0 ) || ((acb->acb_flags & ACB_F_ABORT) != 0 )){
2660 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
2666 return; 2661 return;
2667 } else { 2662 } else {
2668 acb->fw_flag = FW_NORMAL; 2663 acb->fw_flag = FW_NORMAL;
@@ -2670,8 +2665,10 @@ static void arcmsr_request_hba_device_map(struct AdapterControlBlock *acb)
2670 atomic_set(&acb->rq_map_token, 16); 2665 atomic_set(&acb->rq_map_token, 16);
2671 } 2666 }
2672 atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token)); 2667 atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token));
2673 if (atomic_dec_and_test(&acb->rq_map_token)) 2668 if (atomic_dec_and_test(&acb->rq_map_token)) {
2669 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
2674 return; 2670 return;
2671 }
2675 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0); 2672 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
2676 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ)); 2673 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
2677 } 2674 }
@@ -2682,15 +2679,18 @@ static void arcmsr_request_hbb_device_map(struct AdapterControlBlock *acb)
2682{ 2679{
2683 struct MessageUnit_B __iomem *reg = acb->pmuB; 2680 struct MessageUnit_B __iomem *reg = acb->pmuB;
2684 if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0 ) || ((acb->acb_flags & ACB_F_ABORT) != 0 )){ 2681 if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0 ) || ((acb->acb_flags & ACB_F_ABORT) != 0 )){
2682 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
2685 return; 2683 return;
2686 } else { 2684 } else {
2687 acb->fw_flag = FW_NORMAL; 2685 acb->fw_flag = FW_NORMAL;
2688 if (atomic_read(&acb->ante_token_value) == atomic_read(&acb->rq_map_token)) { 2686 if (atomic_read(&acb->ante_token_value) == atomic_read(&acb->rq_map_token)) {
2689 atomic_set(&acb->rq_map_token,16); 2687 atomic_set(&acb->rq_map_token, 16);
2690 } 2688 }
2691 atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token)); 2689 atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token));
2692 if(atomic_dec_and_test(&acb->rq_map_token)) 2690 if (atomic_dec_and_test(&acb->rq_map_token)) {
2691 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
2693 return; 2692 return;
2693 }
2694 writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell); 2694 writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell);
2695 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ)); 2695 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
2696 } 2696 }
@@ -2701,6 +2701,7 @@ static void arcmsr_request_hbc_device_map(struct AdapterControlBlock *acb)
2701{ 2701{
2702 struct MessageUnit_C __iomem *reg = acb->pmuC; 2702 struct MessageUnit_C __iomem *reg = acb->pmuC;
2703 if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0) || ((acb->acb_flags & ACB_F_ABORT) != 0)) { 2703 if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0) || ((acb->acb_flags & ACB_F_ABORT) != 0)) {
2704 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
2704 return; 2705 return;
2705 } else { 2706 } else {
2706 acb->fw_flag = FW_NORMAL; 2707 acb->fw_flag = FW_NORMAL;
@@ -2708,8 +2709,10 @@ static void arcmsr_request_hbc_device_map(struct AdapterControlBlock *acb)
2708 atomic_set(&acb->rq_map_token, 16); 2709 atomic_set(&acb->rq_map_token, 16);
2709 } 2710 }
2710 atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token)); 2711 atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token));
2711 if (atomic_dec_and_test(&acb->rq_map_token)) 2712 if (atomic_dec_and_test(&acb->rq_map_token)) {
2713 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
2712 return; 2714 return;
2715 }
2713 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0); 2716 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
2714 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell); 2717 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
2715 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ)); 2718 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
@@ -2897,6 +2900,8 @@ static uint8_t arcmsr_iop_reset(struct AdapterControlBlock *acb)
2897 uint32_t intmask_org; 2900 uint32_t intmask_org;
2898 uint8_t rtnval = 0x00; 2901 uint8_t rtnval = 0x00;
2899 int i = 0; 2902 int i = 0;
2903 unsigned long flags;
2904
2900 if (atomic_read(&acb->ccboutstandingcount) != 0) { 2905 if (atomic_read(&acb->ccboutstandingcount) != 0) {
2901 /* disable all outbound interrupt */ 2906 /* disable all outbound interrupt */
2902 intmask_org = arcmsr_disable_outbound_ints(acb); 2907 intmask_org = arcmsr_disable_outbound_ints(acb);
@@ -2907,7 +2912,12 @@ static uint8_t arcmsr_iop_reset(struct AdapterControlBlock *acb)
2907 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) { 2912 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
2908 ccb = acb->pccb_pool[i]; 2913 ccb = acb->pccb_pool[i];
2909 if (ccb->startdone == ARCMSR_CCB_START) { 2914 if (ccb->startdone == ARCMSR_CCB_START) {
2910 arcmsr_ccb_complete(ccb); 2915 scsi_dma_unmap(ccb->pcmd);
2916 ccb->startdone = ARCMSR_CCB_DONE;
2917 ccb->ccb_flags = 0;
2918 spin_lock_irqsave(&acb->ccblist_lock, flags);
2919 list_add_tail(&ccb->list, &acb->ccb_free_list);
2920 spin_unlock_irqrestore(&acb->ccblist_lock, flags);
2911 } 2921 }
2912 } 2922 }
2913 atomic_set(&acb->ccboutstandingcount, 0); 2923 atomic_set(&acb->ccboutstandingcount, 0);
@@ -2920,8 +2930,7 @@ static uint8_t arcmsr_iop_reset(struct AdapterControlBlock *acb)
2920 2930
2921static int arcmsr_bus_reset(struct scsi_cmnd *cmd) 2931static int arcmsr_bus_reset(struct scsi_cmnd *cmd)
2922{ 2932{
2923 struct AdapterControlBlock *acb = 2933 struct AdapterControlBlock *acb;
2924 (struct AdapterControlBlock *)cmd->device->host->hostdata;
2925 uint32_t intmask_org, outbound_doorbell; 2934 uint32_t intmask_org, outbound_doorbell;
2926 int retry_count = 0; 2935 int retry_count = 0;
2927 int rtn = FAILED; 2936 int rtn = FAILED;
@@ -2971,31 +2980,16 @@ sleep_again:
2971 atomic_set(&acb->rq_map_token, 16); 2980 atomic_set(&acb->rq_map_token, 16);
2972 atomic_set(&acb->ante_token_value, 16); 2981 atomic_set(&acb->ante_token_value, 16);
2973 acb->fw_flag = FW_NORMAL; 2982 acb->fw_flag = FW_NORMAL;
2974 init_timer(&acb->eternal_timer); 2983 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
2975 acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6*HZ);
2976 acb->eternal_timer.data = (unsigned long) acb;
2977 acb->eternal_timer.function = &arcmsr_request_device_map;
2978 add_timer(&acb->eternal_timer);
2979 acb->acb_flags &= ~ACB_F_BUS_RESET; 2984 acb->acb_flags &= ~ACB_F_BUS_RESET;
2980 rtn = SUCCESS; 2985 rtn = SUCCESS;
2981 printk(KERN_ERR "arcmsr: scsi bus reset eh returns with success\n"); 2986 printk(KERN_ERR "arcmsr: scsi bus reset eh returns with success\n");
2982 } else { 2987 } else {
2983 acb->acb_flags &= ~ACB_F_BUS_RESET; 2988 acb->acb_flags &= ~ACB_F_BUS_RESET;
2984 if (atomic_read(&acb->rq_map_token) == 0) { 2989 atomic_set(&acb->rq_map_token, 16);
2985 atomic_set(&acb->rq_map_token, 16); 2990 atomic_set(&acb->ante_token_value, 16);
2986 atomic_set(&acb->ante_token_value, 16); 2991 acb->fw_flag = FW_NORMAL;
2987 acb->fw_flag = FW_NORMAL; 2992 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ));
2988 init_timer(&acb->eternal_timer);
2989 acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6*HZ);
2990 acb->eternal_timer.data = (unsigned long) acb;
2991 acb->eternal_timer.function = &arcmsr_request_device_map;
2992 add_timer(&acb->eternal_timer);
2993 } else {
2994 atomic_set(&acb->rq_map_token, 16);
2995 atomic_set(&acb->ante_token_value, 16);
2996 acb->fw_flag = FW_NORMAL;
2997 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ));
2998 }
2999 rtn = SUCCESS; 2993 rtn = SUCCESS;
3000 } 2994 }
3001 break; 2995 break;
@@ -3007,21 +3001,10 @@ sleep_again:
3007 rtn = FAILED; 3001 rtn = FAILED;
3008 } else { 3002 } else {
3009 acb->acb_flags &= ~ACB_F_BUS_RESET; 3003 acb->acb_flags &= ~ACB_F_BUS_RESET;
3010 if (atomic_read(&acb->rq_map_token) == 0) { 3004 atomic_set(&acb->rq_map_token, 16);
3011 atomic_set(&acb->rq_map_token, 16); 3005 atomic_set(&acb->ante_token_value, 16);
3012 atomic_set(&acb->ante_token_value, 16); 3006 acb->fw_flag = FW_NORMAL;
3013 acb->fw_flag = FW_NORMAL; 3007 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
3014 init_timer(&acb->eternal_timer);
3015 acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6*HZ);
3016 acb->eternal_timer.data = (unsigned long) acb;
3017 acb->eternal_timer.function = &arcmsr_request_device_map;
3018 add_timer(&acb->eternal_timer);
3019 } else {
3020 atomic_set(&acb->rq_map_token, 16);
3021 atomic_set(&acb->ante_token_value, 16);
3022 acb->fw_flag = FW_NORMAL;
3023 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ));
3024 }
3025 rtn = SUCCESS; 3008 rtn = SUCCESS;
3026 } 3009 }
3027 break; 3010 break;
@@ -3067,31 +3050,16 @@ sleep:
3067 atomic_set(&acb->rq_map_token, 16); 3050 atomic_set(&acb->rq_map_token, 16);
3068 atomic_set(&acb->ante_token_value, 16); 3051 atomic_set(&acb->ante_token_value, 16);
3069 acb->fw_flag = FW_NORMAL; 3052 acb->fw_flag = FW_NORMAL;
3070 init_timer(&acb->eternal_timer); 3053 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
3071 acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ);
3072 acb->eternal_timer.data = (unsigned long) acb;
3073 acb->eternal_timer.function = &arcmsr_request_device_map;
3074 add_timer(&acb->eternal_timer);
3075 acb->acb_flags &= ~ACB_F_BUS_RESET; 3054 acb->acb_flags &= ~ACB_F_BUS_RESET;
3076 rtn = SUCCESS; 3055 rtn = SUCCESS;
3077 printk(KERN_ERR "arcmsr: scsi bus reset eh returns with success\n"); 3056 printk(KERN_ERR "arcmsr: scsi bus reset eh returns with success\n");
3078 } else { 3057 } else {
3079 acb->acb_flags &= ~ACB_F_BUS_RESET; 3058 acb->acb_flags &= ~ACB_F_BUS_RESET;
3080 if (atomic_read(&acb->rq_map_token) == 0) { 3059 atomic_set(&acb->rq_map_token, 16);
3081 atomic_set(&acb->rq_map_token, 16); 3060 atomic_set(&acb->ante_token_value, 16);
3082 atomic_set(&acb->ante_token_value, 16); 3061 acb->fw_flag = FW_NORMAL;
3083 acb->fw_flag = FW_NORMAL; 3062 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ));
3084 init_timer(&acb->eternal_timer);
3085 acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6*HZ);
3086 acb->eternal_timer.data = (unsigned long) acb;
3087 acb->eternal_timer.function = &arcmsr_request_device_map;
3088 add_timer(&acb->eternal_timer);
3089 } else {
3090 atomic_set(&acb->rq_map_token, 16);
3091 atomic_set(&acb->ante_token_value, 16);
3092 acb->fw_flag = FW_NORMAL;
3093 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ));
3094 }
3095 rtn = SUCCESS; 3063 rtn = SUCCESS;
3096 } 3064 }
3097 break; 3065 break;
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 5815cbeb27a6..9a7aaf5f1311 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -646,6 +646,7 @@ void sas_scsi_recover_host(struct Scsi_Host *shost)
646 646
647 spin_lock_irqsave(shost->host_lock, flags); 647 spin_lock_irqsave(shost->host_lock, flags);
648 list_splice_init(&shost->eh_cmd_q, &eh_work_q); 648 list_splice_init(&shost->eh_cmd_q, &eh_work_q);
649 shost->host_eh_scheduled = 0;
649 spin_unlock_irqrestore(shost->host_lock, flags); 650 spin_unlock_irqrestore(shost->host_lock, flags);
650 651
651 SAS_DPRINTK("Enter %s\n", __func__); 652 SAS_DPRINTK("Enter %s\n", __func__);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index b2a817055b8b..9ead0399808a 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -2176,9 +2176,9 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
2176 /* adjust hba_queue_depth, reply_free_queue_depth, 2176 /* adjust hba_queue_depth, reply_free_queue_depth,
2177 * and queue_size 2177 * and queue_size
2178 */ 2178 */
2179 ioc->hba_queue_depth -= queue_diff; 2179 ioc->hba_queue_depth -= (queue_diff / 2);
2180 ioc->reply_free_queue_depth -= queue_diff; 2180 ioc->reply_free_queue_depth -= (queue_diff / 2);
2181 queue_size -= queue_diff; 2181 queue_size = facts->MaxReplyDescriptorPostQueueDepth;
2182 } 2182 }
2183 ioc->reply_post_queue_depth = queue_size; 2183 ioc->reply_post_queue_depth = queue_size;
2184 2184
@@ -3941,6 +3941,8 @@ mpt2sas_base_detach(struct MPT2SAS_ADAPTER *ioc)
3941static void 3941static void
3942_base_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase) 3942_base_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
3943{ 3943{
3944 mpt2sas_scsih_reset_handler(ioc, reset_phase);
3945 mpt2sas_ctl_reset_handler(ioc, reset_phase);
3944 switch (reset_phase) { 3946 switch (reset_phase) {
3945 case MPT2_IOC_PRE_RESET: 3947 case MPT2_IOC_PRE_RESET:
3946 dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: " 3948 dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
@@ -3971,8 +3973,6 @@ _base_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
3971 "MPT2_IOC_DONE_RESET\n", ioc->name, __func__)); 3973 "MPT2_IOC_DONE_RESET\n", ioc->name, __func__));
3972 break; 3974 break;
3973 } 3975 }
3974 mpt2sas_scsih_reset_handler(ioc, reset_phase);
3975 mpt2sas_ctl_reset_handler(ioc, reset_phase);
3976} 3976}
3977 3977
3978/** 3978/**
@@ -4026,6 +4026,7 @@ mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag,
4026{ 4026{
4027 int r; 4027 int r;
4028 unsigned long flags; 4028 unsigned long flags;
4029 u8 pe_complete = ioc->wait_for_port_enable_to_complete;
4029 4030
4030 dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name, 4031 dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name,
4031 __func__)); 4032 __func__));
@@ -4068,6 +4069,14 @@ mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag,
4068 if (r) 4069 if (r)
4069 goto out; 4070 goto out;
4070 _base_reset_handler(ioc, MPT2_IOC_AFTER_RESET); 4071 _base_reset_handler(ioc, MPT2_IOC_AFTER_RESET);
4072
4073 /* If this hard reset is called while port enable is active, then
4074 * there is no reason to call make_ioc_operational
4075 */
4076 if (pe_complete) {
4077 r = -EFAULT;
4078 goto out;
4079 }
4071 r = _base_make_ioc_operational(ioc, sleep_flag); 4080 r = _base_make_ioc_operational(ioc, sleep_flag);
4072 if (!r) 4081 if (!r)
4073 _base_reset_handler(ioc, MPT2_IOC_DONE_RESET); 4082 _base_reset_handler(ioc, MPT2_IOC_DONE_RESET);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index eda347c57979..5ded3db6e316 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -819,7 +819,7 @@ _scsih_is_end_device(u32 device_info)
819} 819}
820 820
821/** 821/**
822 * mptscsih_get_scsi_lookup - returns scmd entry 822 * _scsih_scsi_lookup_get - returns scmd entry
823 * @ioc: per adapter object 823 * @ioc: per adapter object
824 * @smid: system request message index 824 * @smid: system request message index
825 * 825 *
@@ -832,6 +832,28 @@ _scsih_scsi_lookup_get(struct MPT2SAS_ADAPTER *ioc, u16 smid)
832} 832}
833 833
834/** 834/**
835 * _scsih_scsi_lookup_get_clear - returns scmd entry
836 * @ioc: per adapter object
837 * @smid: system request message index
838 *
839 * Returns the smid stored scmd pointer.
840 * Then will derefrence the stored scmd pointer.
841 */
842static inline struct scsi_cmnd *
843_scsih_scsi_lookup_get_clear(struct MPT2SAS_ADAPTER *ioc, u16 smid)
844{
845 unsigned long flags;
846 struct scsi_cmnd *scmd;
847
848 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
849 scmd = ioc->scsi_lookup[smid - 1].scmd;
850 ioc->scsi_lookup[smid - 1].scmd = NULL;
851 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
852
853 return scmd;
854}
855
856/**
835 * _scsih_scsi_lookup_find_by_scmd - scmd lookup 857 * _scsih_scsi_lookup_find_by_scmd - scmd lookup
836 * @ioc: per adapter object 858 * @ioc: per adapter object
837 * @smid: system request message index 859 * @smid: system request message index
@@ -2981,9 +3003,6 @@ _scsih_check_topo_delete_events(struct MPT2SAS_ADAPTER *ioc,
2981 u16 handle; 3003 u16 handle;
2982 3004
2983 for (i = 0 ; i < event_data->NumEntries; i++) { 3005 for (i = 0 ; i < event_data->NumEntries; i++) {
2984 if (event_data->PHY[i].PhyStatus &
2985 MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT)
2986 continue;
2987 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); 3006 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
2988 if (!handle) 3007 if (!handle)
2989 continue; 3008 continue;
@@ -3210,7 +3229,7 @@ _scsih_flush_running_cmds(struct MPT2SAS_ADAPTER *ioc)
3210 u16 count = 0; 3229 u16 count = 0;
3211 3230
3212 for (smid = 1; smid <= ioc->scsiio_depth; smid++) { 3231 for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
3213 scmd = _scsih_scsi_lookup_get(ioc, smid); 3232 scmd = _scsih_scsi_lookup_get_clear(ioc, smid);
3214 if (!scmd) 3233 if (!scmd)
3215 continue; 3234 continue;
3216 count++; 3235 count++;
@@ -3804,7 +3823,7 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
3804 u32 response_code = 0; 3823 u32 response_code = 0;
3805 3824
3806 mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply); 3825 mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
3807 scmd = _scsih_scsi_lookup_get(ioc, smid); 3826 scmd = _scsih_scsi_lookup_get_clear(ioc, smid);
3808 if (scmd == NULL) 3827 if (scmd == NULL)
3809 return 1; 3828 return 1;
3810 3829
@@ -5005,6 +5024,12 @@ _scsih_sas_device_status_change_event(struct MPT2SAS_ADAPTER *ioc,
5005 event_data); 5024 event_data);
5006#endif 5025#endif
5007 5026
5027 /* In MPI Revision K (0xC), the internal device reset complete was
5028 * implemented, so avoid setting tm_busy flag for older firmware.
5029 */
5030 if ((ioc->facts.HeaderVersion >> 8) < 0xC)
5031 return;
5032
5008 if (event_data->ReasonCode != 5033 if (event_data->ReasonCode !=
5009 MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET && 5034 MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET &&
5010 event_data->ReasonCode != 5035 event_data->ReasonCode !=
@@ -5099,6 +5124,7 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc,
5099 struct fw_event_work *fw_event) 5124 struct fw_event_work *fw_event)
5100{ 5125{
5101 struct scsi_cmnd *scmd; 5126 struct scsi_cmnd *scmd;
5127 struct scsi_device *sdev;
5102 u16 smid, handle; 5128 u16 smid, handle;
5103 u32 lun; 5129 u32 lun;
5104 struct MPT2SAS_DEVICE *sas_device_priv_data; 5130 struct MPT2SAS_DEVICE *sas_device_priv_data;
@@ -5109,12 +5135,17 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc,
5109 Mpi2EventDataSasBroadcastPrimitive_t *event_data = fw_event->event_data; 5135 Mpi2EventDataSasBroadcastPrimitive_t *event_data = fw_event->event_data;
5110#endif 5136#endif
5111 u16 ioc_status; 5137 u16 ioc_status;
5138 unsigned long flags;
5139 int r;
5140
5112 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "broadcast primative: " 5141 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "broadcast primative: "
5113 "phy number(%d), width(%d)\n", ioc->name, event_data->PhyNum, 5142 "phy number(%d), width(%d)\n", ioc->name, event_data->PhyNum,
5114 event_data->PortWidth)); 5143 event_data->PortWidth));
5115 dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name, 5144 dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name,
5116 __func__)); 5145 __func__));
5117 5146
5147 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
5148 ioc->broadcast_aen_busy = 0;
5118 termination_count = 0; 5149 termination_count = 0;
5119 query_count = 0; 5150 query_count = 0;
5120 mpi_reply = ioc->tm_cmds.reply; 5151 mpi_reply = ioc->tm_cmds.reply;
@@ -5122,7 +5153,8 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc,
5122 scmd = _scsih_scsi_lookup_get(ioc, smid); 5153 scmd = _scsih_scsi_lookup_get(ioc, smid);
5123 if (!scmd) 5154 if (!scmd)
5124 continue; 5155 continue;
5125 sas_device_priv_data = scmd->device->hostdata; 5156 sdev = scmd->device;
5157 sas_device_priv_data = sdev->hostdata;
5126 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) 5158 if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
5127 continue; 5159 continue;
5128 /* skip hidden raid components */ 5160 /* skip hidden raid components */
@@ -5138,6 +5170,7 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc,
5138 lun = sas_device_priv_data->lun; 5170 lun = sas_device_priv_data->lun;
5139 query_count++; 5171 query_count++;
5140 5172
5173 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
5141 mpt2sas_scsih_issue_tm(ioc, handle, 0, 0, lun, 5174 mpt2sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
5142 MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30, NULL); 5175 MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30, NULL);
5143 ioc->tm_cmds.status = MPT2_CMD_NOT_USED; 5176 ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
@@ -5147,14 +5180,20 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc,
5147 (mpi_reply->ResponseCode == 5180 (mpi_reply->ResponseCode ==
5148 MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED || 5181 MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED ||
5149 mpi_reply->ResponseCode == 5182 mpi_reply->ResponseCode ==
5150 MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC)) 5183 MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC)) {
5184 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
5151 continue; 5185 continue;
5152 5186 }
5153 mpt2sas_scsih_issue_tm(ioc, handle, 0, 0, lun, 5187 r = mpt2sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id,
5154 MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET, 0, 30, NULL); 5188 sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30,
5189 scmd);
5190 if (r == FAILED)
5191 sdev_printk(KERN_WARNING, sdev, "task abort: FAILED "
5192 "scmd(%p)\n", scmd);
5155 termination_count += le32_to_cpu(mpi_reply->TerminationCount); 5193 termination_count += le32_to_cpu(mpi_reply->TerminationCount);
5194 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
5156 } 5195 }
5157 ioc->broadcast_aen_busy = 0; 5196 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
5158 5197
5159 dtmprintk(ioc, printk(MPT2SAS_INFO_FMT 5198 dtmprintk(ioc, printk(MPT2SAS_INFO_FMT
5160 "%s - exit, query_count = %d termination_count = %d\n", 5199 "%s - exit, query_count = %d termination_count = %d\n",
@@ -6626,6 +6665,7 @@ _scsih_remove(struct pci_dev *pdev)
6626 destroy_workqueue(wq); 6665 destroy_workqueue(wq);
6627 6666
6628 /* release all the volumes */ 6667 /* release all the volumes */
6668 _scsih_ir_shutdown(ioc);
6629 list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list, 6669 list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list,
6630 list) { 6670 list) {
6631 if (raid_device->starget) { 6671 if (raid_device->starget) {
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 44578b56ad0a..d3e58d763b43 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1561,6 +1561,7 @@ qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
1561{ 1561{
1562 struct Scsi_Host *host = rport_to_shost(rport); 1562 struct Scsi_Host *host = rport_to_shost(rport);
1563 fc_port_t *fcport = *(fc_port_t **)rport->dd_data; 1563 fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
1564 unsigned long flags;
1564 1565
1565 if (!fcport) 1566 if (!fcport)
1566 return; 1567 return;
@@ -1573,10 +1574,10 @@ qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
1573 * Transport has effectively 'deleted' the rport, clear 1574 * Transport has effectively 'deleted' the rport, clear
1574 * all local references. 1575 * all local references.
1575 */ 1576 */
1576 spin_lock_irq(host->host_lock); 1577 spin_lock_irqsave(host->host_lock, flags);
1577 fcport->rport = fcport->drport = NULL; 1578 fcport->rport = fcport->drport = NULL;
1578 *((fc_port_t **)rport->dd_data) = NULL; 1579 *((fc_port_t **)rport->dd_data) = NULL;
1579 spin_unlock_irq(host->host_lock); 1580 spin_unlock_irqrestore(host->host_lock, flags);
1580 1581
1581 if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags)) 1582 if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
1582 return; 1583 return;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index f948e1a73aec..d9479c3fe5f8 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -2505,11 +2505,12 @@ qla2x00_rport_del(void *data)
2505{ 2505{
2506 fc_port_t *fcport = data; 2506 fc_port_t *fcport = data;
2507 struct fc_rport *rport; 2507 struct fc_rport *rport;
2508 unsigned long flags;
2508 2509
2509 spin_lock_irq(fcport->vha->host->host_lock); 2510 spin_lock_irqsave(fcport->vha->host->host_lock, flags);
2510 rport = fcport->drport ? fcport->drport: fcport->rport; 2511 rport = fcport->drport ? fcport->drport: fcport->rport;
2511 fcport->drport = NULL; 2512 fcport->drport = NULL;
2512 spin_unlock_irq(fcport->vha->host->host_lock); 2513 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
2513 if (rport) 2514 if (rport)
2514 fc_remote_port_delete(rport); 2515 fc_remote_port_delete(rport);
2515} 2516}
@@ -2879,6 +2880,7 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
2879 struct fc_rport_identifiers rport_ids; 2880 struct fc_rport_identifiers rport_ids;
2880 struct fc_rport *rport; 2881 struct fc_rport *rport;
2881 struct qla_hw_data *ha = vha->hw; 2882 struct qla_hw_data *ha = vha->hw;
2883 unsigned long flags;
2882 2884
2883 qla2x00_rport_del(fcport); 2885 qla2x00_rport_del(fcport);
2884 2886
@@ -2893,9 +2895,9 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
2893 "Unable to allocate fc remote port!\n"); 2895 "Unable to allocate fc remote port!\n");
2894 return; 2896 return;
2895 } 2897 }
2896 spin_lock_irq(fcport->vha->host->host_lock); 2898 spin_lock_irqsave(fcport->vha->host->host_lock, flags);
2897 *((fc_port_t **)rport->dd_data) = fcport; 2899 *((fc_port_t **)rport->dd_data) = fcport;
2898 spin_unlock_irq(fcport->vha->host->host_lock); 2900 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
2899 2901
2900 rport->supported_classes = fcport->supported_classes; 2902 rport->supported_classes = fcport->supported_classes;
2901 2903
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index c194c23ca1fb..f27724d76cf6 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -562,7 +562,6 @@ qla2xxx_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)
562 } 562 }
563 if (atomic_read(&fcport->state) != FCS_ONLINE) { 563 if (atomic_read(&fcport->state) != FCS_ONLINE) {
564 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || 564 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
565 atomic_read(&fcport->state) == FCS_DEVICE_LOST ||
566 atomic_read(&base_vha->loop_state) == LOOP_DEAD) { 565 atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
567 cmd->result = DID_NO_CONNECT << 16; 566 cmd->result = DID_NO_CONNECT << 16;
568 goto qc24_fail_command; 567 goto qc24_fail_command;
@@ -2513,6 +2512,7 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
2513{ 2512{
2514 struct fc_rport *rport; 2513 struct fc_rport *rport;
2515 scsi_qla_host_t *base_vha; 2514 scsi_qla_host_t *base_vha;
2515 unsigned long flags;
2516 2516
2517 if (!fcport->rport) 2517 if (!fcport->rport)
2518 return; 2518 return;
@@ -2520,9 +2520,9 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
2520 rport = fcport->rport; 2520 rport = fcport->rport;
2521 if (defer) { 2521 if (defer) {
2522 base_vha = pci_get_drvdata(vha->hw->pdev); 2522 base_vha = pci_get_drvdata(vha->hw->pdev);
2523 spin_lock_irq(vha->host->host_lock); 2523 spin_lock_irqsave(vha->host->host_lock, flags);
2524 fcport->drport = rport; 2524 fcport->drport = rport;
2525 spin_unlock_irq(vha->host->host_lock); 2525 spin_unlock_irqrestore(vha->host->host_lock, flags);
2526 set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags); 2526 set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
2527 qla2xxx_wake_dpc(base_vha); 2527 qla2xxx_wake_dpc(base_vha);
2528 } else 2528 } else
@@ -3282,10 +3282,10 @@ qla2x00_do_dpc(void *data)
3282 3282
3283 set_user_nice(current, -20); 3283 set_user_nice(current, -20);
3284 3284
3285 set_current_state(TASK_INTERRUPTIBLE);
3285 while (!kthread_should_stop()) { 3286 while (!kthread_should_stop()) {
3286 DEBUG3(printk("qla2x00: DPC handler sleeping\n")); 3287 DEBUG3(printk("qla2x00: DPC handler sleeping\n"));
3287 3288
3288 set_current_state(TASK_INTERRUPTIBLE);
3289 schedule(); 3289 schedule();
3290 __set_current_state(TASK_RUNNING); 3290 __set_current_state(TASK_RUNNING);
3291 3291
@@ -3454,7 +3454,9 @@ qla2x00_do_dpc(void *data)
3454 qla2x00_do_dpc_all_vps(base_vha); 3454 qla2x00_do_dpc_all_vps(base_vha);
3455 3455
3456 ha->dpc_active = 0; 3456 ha->dpc_active = 0;
3457 set_current_state(TASK_INTERRUPTIBLE);
3457 } /* End of while(1) */ 3458 } /* End of while(1) */
3459 __set_current_state(TASK_RUNNING);
3458 3460
3459 DEBUG(printk("scsi(%ld): DPC handler exiting\n", base_vha->host_no)); 3461 DEBUG(printk("scsi(%ld): DPC handler exiting\n", base_vha->host_no));
3460 3462
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 7b310934efed..a6b2d72022fc 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -1671,7 +1671,7 @@ static int do_device_access(struct scsi_cmnd *scmd,
1671 unsigned long long lba, unsigned int num, int write) 1671 unsigned long long lba, unsigned int num, int write)
1672{ 1672{
1673 int ret; 1673 int ret;
1674 unsigned int block, rest = 0; 1674 unsigned long long block, rest = 0;
1675 int (*func)(struct scsi_cmnd *, unsigned char *, int); 1675 int (*func)(struct scsi_cmnd *, unsigned char *, int);
1676 1676
1677 func = write ? fetch_to_dev_buffer : fill_from_dev_buffer; 1677 func = write ? fetch_to_dev_buffer : fill_from_dev_buffer;
diff --git a/drivers/spi/pxa2xx_spi_pci.c b/drivers/spi/pxa2xx_spi_pci.c
index b6589bb3a6c3..378e504f89eb 100644
--- a/drivers/spi/pxa2xx_spi_pci.c
+++ b/drivers/spi/pxa2xx_spi_pci.c
@@ -7,10 +7,9 @@
7#include <linux/of_device.h> 7#include <linux/of_device.h>
8#include <linux/spi/pxa2xx_spi.h> 8#include <linux/spi/pxa2xx_spi.h>
9 9
10struct awesome_struct { 10struct ce4100_info {
11 struct ssp_device ssp; 11 struct ssp_device ssp;
12 struct platform_device spi_pdev; 12 struct platform_device *spi_pdev;
13 struct pxa2xx_spi_master spi_pdata;
14}; 13};
15 14
16static DEFINE_MUTEX(ssp_lock); 15static DEFINE_MUTEX(ssp_lock);
@@ -51,23 +50,15 @@ void pxa_ssp_free(struct ssp_device *ssp)
51} 50}
52EXPORT_SYMBOL_GPL(pxa_ssp_free); 51EXPORT_SYMBOL_GPL(pxa_ssp_free);
53 52
54static void plat_dev_release(struct device *dev)
55{
56 struct awesome_struct *as = container_of(dev,
57 struct awesome_struct, spi_pdev.dev);
58
59 of_device_node_put(&as->spi_pdev.dev);
60}
61
62static int __devinit ce4100_spi_probe(struct pci_dev *dev, 53static int __devinit ce4100_spi_probe(struct pci_dev *dev,
63 const struct pci_device_id *ent) 54 const struct pci_device_id *ent)
64{ 55{
65 int ret; 56 int ret;
66 resource_size_t phys_beg; 57 resource_size_t phys_beg;
67 resource_size_t phys_len; 58 resource_size_t phys_len;
68 struct awesome_struct *spi_info; 59 struct ce4100_info *spi_info;
69 struct platform_device *pdev; 60 struct platform_device *pdev;
70 struct pxa2xx_spi_master *spi_pdata; 61 struct pxa2xx_spi_master spi_pdata;
71 struct ssp_device *ssp; 62 struct ssp_device *ssp;
72 63
73 ret = pci_enable_device(dev); 64 ret = pci_enable_device(dev);
@@ -84,31 +75,28 @@ static int __devinit ce4100_spi_probe(struct pci_dev *dev,
84 return ret; 75 return ret;
85 } 76 }
86 77
78 pdev = platform_device_alloc("pxa2xx-spi", dev->devfn);
87 spi_info = kzalloc(sizeof(*spi_info), GFP_KERNEL); 79 spi_info = kzalloc(sizeof(*spi_info), GFP_KERNEL);
88 if (!spi_info) { 80 if (!pdev || !spi_info ) {
89 ret = -ENOMEM; 81 ret = -ENOMEM;
90 goto err_kz; 82 goto err_nomem;
91 } 83 }
92 ssp = &spi_info->ssp; 84 memset(&spi_pdata, 0, sizeof(spi_pdata));
93 pdev = &spi_info->spi_pdev; 85 spi_pdata.num_chipselect = dev->devfn;
94 spi_pdata = &spi_info->spi_pdata;
95 86
96 pdev->name = "pxa2xx-spi"; 87 ret = platform_device_add_data(pdev, &spi_pdata, sizeof(spi_pdata));
97 pdev->id = dev->devfn; 88 if (ret)
98 pdev->dev.parent = &dev->dev; 89 goto err_nomem;
99 pdev->dev.platform_data = &spi_info->spi_pdata;
100 90
91 pdev->dev.parent = &dev->dev;
101 pdev->dev.of_node = dev->dev.of_node; 92 pdev->dev.of_node = dev->dev.of_node;
102 pdev->dev.release = plat_dev_release; 93 ssp = &spi_info->ssp;
103
104 spi_pdata->num_chipselect = dev->devfn;
105
106 ssp->phys_base = pci_resource_start(dev, 0); 94 ssp->phys_base = pci_resource_start(dev, 0);
107 ssp->mmio_base = ioremap(phys_beg, phys_len); 95 ssp->mmio_base = ioremap(phys_beg, phys_len);
108 if (!ssp->mmio_base) { 96 if (!ssp->mmio_base) {
109 dev_err(&pdev->dev, "failed to ioremap() registers\n"); 97 dev_err(&pdev->dev, "failed to ioremap() registers\n");
110 ret = -EIO; 98 ret = -EIO;
111 goto err_remap; 99 goto err_nomem;
112 } 100 }
113 ssp->irq = dev->irq; 101 ssp->irq = dev->irq;
114 ssp->port_id = pdev->id; 102 ssp->port_id = pdev->id;
@@ -120,7 +108,7 @@ static int __devinit ce4100_spi_probe(struct pci_dev *dev,
120 108
121 pci_set_drvdata(dev, spi_info); 109 pci_set_drvdata(dev, spi_info);
122 110
123 ret = platform_device_register(pdev); 111 ret = platform_device_add(pdev);
124 if (ret) 112 if (ret)
125 goto err_dev_add; 113 goto err_dev_add;
126 114
@@ -133,27 +121,21 @@ err_dev_add:
133 mutex_unlock(&ssp_lock); 121 mutex_unlock(&ssp_lock);
134 iounmap(ssp->mmio_base); 122 iounmap(ssp->mmio_base);
135 123
136err_remap: 124err_nomem:
137 kfree(spi_info);
138
139err_kz:
140 release_mem_region(phys_beg, phys_len); 125 release_mem_region(phys_beg, phys_len);
141 126 platform_device_put(pdev);
127 kfree(spi_info);
142 return ret; 128 return ret;
143} 129}
144 130
145static void __devexit ce4100_spi_remove(struct pci_dev *dev) 131static void __devexit ce4100_spi_remove(struct pci_dev *dev)
146{ 132{
147 struct awesome_struct *spi_info; 133 struct ce4100_info *spi_info;
148 struct platform_device *pdev;
149 struct ssp_device *ssp; 134 struct ssp_device *ssp;
150 135
151 spi_info = pci_get_drvdata(dev); 136 spi_info = pci_get_drvdata(dev);
152
153 ssp = &spi_info->ssp; 137 ssp = &spi_info->ssp;
154 pdev = &spi_info->spi_pdev; 138 platform_device_unregister(spi_info->spi_pdev);
155
156 platform_device_unregister(pdev);
157 139
158 iounmap(ssp->mmio_base); 140 iounmap(ssp->mmio_base);
159 release_mem_region(pci_resource_start(dev, 0), 141 release_mem_region(pci_resource_start(dev, 0),
@@ -169,7 +151,6 @@ static void __devexit ce4100_spi_remove(struct pci_dev *dev)
169} 151}
170 152
171static struct pci_device_id ce4100_spi_devices[] __devinitdata = { 153static struct pci_device_id ce4100_spi_devices[] __devinitdata = {
172
173 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2e6a) }, 154 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2e6a) },
174 { }, 155 { },
175}; 156};
diff --git a/drivers/spi/spi_sh_msiof.c b/drivers/spi/spi_sh_msiof.c
index 56f60c8ea0ab..2c665fceaac7 100644
--- a/drivers/spi/spi_sh_msiof.c
+++ b/drivers/spi/spi_sh_msiof.c
@@ -509,9 +509,11 @@ static int sh_msiof_spi_txrx(struct spi_device *spi, struct spi_transfer *t)
509 bytes_done = 0; 509 bytes_done = 0;
510 510
511 while (bytes_done < t->len) { 511 while (bytes_done < t->len) {
512 void *rx_buf = t->rx_buf ? t->rx_buf + bytes_done : NULL;
513 const void *tx_buf = t->tx_buf ? t->tx_buf + bytes_done : NULL;
512 n = sh_msiof_spi_txrx_once(p, tx_fifo, rx_fifo, 514 n = sh_msiof_spi_txrx_once(p, tx_fifo, rx_fifo,
513 t->tx_buf + bytes_done, 515 tx_buf,
514 t->rx_buf + bytes_done, 516 rx_buf,
515 words, bits); 517 words, bits);
516 if (n < 0) 518 if (n < 0)
517 break; 519 break;
diff --git a/drivers/ssb/pcmcia.c b/drivers/ssb/pcmcia.c
index c7345dbf43fa..f8533795ee7f 100644
--- a/drivers/ssb/pcmcia.c
+++ b/drivers/ssb/pcmcia.c
@@ -733,7 +733,7 @@ int ssb_pcmcia_get_invariants(struct ssb_bus *bus,
733 733
734 /* Fetch the vendor specific tuples. */ 734 /* Fetch the vendor specific tuples. */
735 res = pcmcia_loop_tuple(bus->host_pcmcia, SSB_PCMCIA_CIS, 735 res = pcmcia_loop_tuple(bus->host_pcmcia, SSB_PCMCIA_CIS,
736 ssb_pcmcia_do_get_invariants, sprom); 736 ssb_pcmcia_do_get_invariants, iv);
737 if ((res == 0) || (res == -ENOSPC)) 737 if ((res == 0) || (res == -ENOSPC))
738 return 0; 738 return 0;
739 739
diff --git a/drivers/staging/brcm80211/sys/wl_mac80211.c b/drivers/staging/brcm80211/sys/wl_mac80211.c
index f1235884cc5d..cd8392badff0 100644
--- a/drivers/staging/brcm80211/sys/wl_mac80211.c
+++ b/drivers/staging/brcm80211/sys/wl_mac80211.c
@@ -263,9 +263,7 @@ ieee_set_channel(struct ieee80211_hw *hw, struct ieee80211_channel *chan,
263 switch (type) { 263 switch (type) {
264 case NL80211_CHAN_HT20: 264 case NL80211_CHAN_HT20:
265 case NL80211_CHAN_NO_HT: 265 case NL80211_CHAN_NO_HT:
266 WL_LOCK(wl);
267 err = wlc_set(wl->wlc, WLC_SET_CHANNEL, chan->hw_value); 266 err = wlc_set(wl->wlc, WLC_SET_CHANNEL, chan->hw_value);
268 WL_UNLOCK(wl);
269 break; 267 break;
270 case NL80211_CHAN_HT40MINUS: 268 case NL80211_CHAN_HT40MINUS:
271 case NL80211_CHAN_HT40PLUS: 269 case NL80211_CHAN_HT40PLUS:
@@ -285,6 +283,7 @@ static int wl_ops_config(struct ieee80211_hw *hw, u32 changed)
285 int err = 0; 283 int err = 0;
286 int new_int; 284 int new_int;
287 285
286 WL_LOCK(wl);
288 if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) { 287 if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) {
289 WL_NONE("%s: Setting listen interval to %d\n", 288 WL_NONE("%s: Setting listen interval to %d\n",
290 __func__, conf->listen_interval); 289 __func__, conf->listen_interval);
@@ -341,6 +340,7 @@ static int wl_ops_config(struct ieee80211_hw *hw, u32 changed)
341 } 340 }
342 341
343 config_out: 342 config_out:
343 WL_UNLOCK(wl);
344 return err; 344 return err;
345} 345}
346 346
@@ -459,13 +459,21 @@ wl_ops_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
459 459
460static void wl_ops_sw_scan_start(struct ieee80211_hw *hw) 460static void wl_ops_sw_scan_start(struct ieee80211_hw *hw)
461{ 461{
462 struct wl_info *wl = hw->priv;
462 WL_NONE("Scan Start\n"); 463 WL_NONE("Scan Start\n");
464 WL_LOCK(wl);
465 wlc_scan_start(wl->wlc);
466 WL_UNLOCK(wl);
463 return; 467 return;
464} 468}
465 469
466static void wl_ops_sw_scan_complete(struct ieee80211_hw *hw) 470static void wl_ops_sw_scan_complete(struct ieee80211_hw *hw)
467{ 471{
472 struct wl_info *wl = hw->priv;
468 WL_NONE("Scan Complete\n"); 473 WL_NONE("Scan Complete\n");
474 WL_LOCK(wl);
475 wlc_scan_stop(wl->wlc);
476 WL_UNLOCK(wl);
469 return; 477 return;
470} 478}
471 479
diff --git a/drivers/staging/brcm80211/sys/wlc_mac80211.c b/drivers/staging/brcm80211/sys/wlc_mac80211.c
index a1303863686c..e37e8058e2b8 100644
--- a/drivers/staging/brcm80211/sys/wlc_mac80211.c
+++ b/drivers/staging/brcm80211/sys/wlc_mac80211.c
@@ -8461,3 +8461,16 @@ static void wlc_txq_free(struct wlc_info *wlc, struct osl_info *osh,
8461 8461
8462 kfree(qi); 8462 kfree(qi);
8463} 8463}
8464
8465/*
8466 * Flag 'scan in progress' to withold dynamic phy calibration
8467 */
8468void wlc_scan_start(struct wlc_info *wlc)
8469{
8470 wlc_phy_hold_upd(wlc->band->pi, PHY_HOLD_FOR_SCAN, true);
8471}
8472
8473void wlc_scan_stop(struct wlc_info *wlc)
8474{
8475 wlc_phy_hold_upd(wlc->band->pi, PHY_HOLD_FOR_SCAN, false);
8476}
diff --git a/drivers/staging/brcm80211/sys/wlc_pub.h b/drivers/staging/brcm80211/sys/wlc_pub.h
index 146a6904a39b..aff413001b70 100644
--- a/drivers/staging/brcm80211/sys/wlc_pub.h
+++ b/drivers/staging/brcm80211/sys/wlc_pub.h
@@ -570,6 +570,8 @@ extern void wlc_enable_mac(struct wlc_info *wlc);
570extern u16 wlc_rate_shm_offset(struct wlc_info *wlc, u8 rate); 570extern u16 wlc_rate_shm_offset(struct wlc_info *wlc, u8 rate);
571extern u32 wlc_get_rspec_history(struct wlc_bsscfg *cfg); 571extern u32 wlc_get_rspec_history(struct wlc_bsscfg *cfg);
572extern u32 wlc_get_current_highest_rate(struct wlc_bsscfg *cfg); 572extern u32 wlc_get_current_highest_rate(struct wlc_bsscfg *cfg);
573extern void wlc_scan_start(struct wlc_info *wlc);
574extern void wlc_scan_stop(struct wlc_info *wlc);
573 575
574static inline int wlc_iovar_getuint(struct wlc_info *wlc, const char *name, 576static inline int wlc_iovar_getuint(struct wlc_info *wlc, const char *name,
575 uint *arg) 577 uint *arg)
diff --git a/drivers/staging/comedi/Kconfig b/drivers/staging/comedi/Kconfig
index aad47326d6dc..1502d80f6f78 100644
--- a/drivers/staging/comedi/Kconfig
+++ b/drivers/staging/comedi/Kconfig
@@ -439,6 +439,7 @@ config COMEDI_NI_AT_AO
439config COMEDI_NI_ATMIO 439config COMEDI_NI_ATMIO
440 tristate "NI AT-MIO E series ISA-PNP card support" 440 tristate "NI AT-MIO E series ISA-PNP card support"
441 depends on ISAPNP && COMEDI_NI_TIO && COMEDI_NI_COMMON 441 depends on ISAPNP && COMEDI_NI_TIO && COMEDI_NI_COMMON
442 select COMEDI_8255
442 default N 443 default N
443 ---help--- 444 ---help---
444 Enable support for National Instruments AT-MIO E series cards 445 Enable support for National Instruments AT-MIO E series cards
@@ -1040,6 +1041,8 @@ config COMEDI_NI_PCIDIO
1040config COMEDI_NI_PCIMIO 1041config COMEDI_NI_PCIMIO
1041 tristate "NI PCI-MIO-E series and M series support" 1042 tristate "NI PCI-MIO-E series and M series support"
1042 depends on COMEDI_NI_TIO && COMEDI_NI_COMMON 1043 depends on COMEDI_NI_TIO && COMEDI_NI_COMMON
1044 select COMEDI_8255
1045 select COMEDI_FC
1043 default N 1046 default N
1044 ---help--- 1047 ---help---
1045 Enable support for National Instruments PCI-MIO-E series and M series 1048 Enable support for National Instruments PCI-MIO-E series and M series
@@ -1164,6 +1167,7 @@ config COMEDI_NI_LABPC_CS
1164config COMEDI_NI_MIO_CS 1167config COMEDI_NI_MIO_CS
1165 tristate "NI DAQCard E series PCMCIA support" 1168 tristate "NI DAQCard E series PCMCIA support"
1166 depends on COMEDI_NI_TIO && COMEDI_NI_COMMON 1169 depends on COMEDI_NI_TIO && COMEDI_NI_COMMON
1170 select COMEDI_8255
1167 select COMEDI_FC 1171 select COMEDI_FC
1168 default N 1172 default N
1169 ---help--- 1173 ---help---
@@ -1268,7 +1272,6 @@ config COMEDI_MITE
1268config COMEDI_NI_TIO 1272config COMEDI_NI_TIO
1269 tristate "NI general purpose counter support" 1273 tristate "NI general purpose counter support"
1270 depends on COMEDI_MITE 1274 depends on COMEDI_MITE
1271 select COMEDI_8255
1272 default N 1275 default N
1273 ---help--- 1276 ---help---
1274 Enable support for National Instruments general purpose counters. 1277 Enable support for National Instruments general purpose counters.
diff --git a/drivers/staging/comedi/drivers/mite.c b/drivers/staging/comedi/drivers/mite.c
index cd25b241cc1f..fd274e9c7b78 100644
--- a/drivers/staging/comedi/drivers/mite.c
+++ b/drivers/staging/comedi/drivers/mite.c
@@ -61,8 +61,6 @@
61#define PCI_DAQ_SIZE 4096 61#define PCI_DAQ_SIZE 4096
62#define PCI_DAQ_SIZE_660X 8192 62#define PCI_DAQ_SIZE_660X 8192
63 63
64MODULE_LICENSE("GPL");
65
66struct mite_struct *mite_devices; 64struct mite_struct *mite_devices;
67EXPORT_SYMBOL(mite_devices); 65EXPORT_SYMBOL(mite_devices);
68 66
diff --git a/drivers/staging/comedi/drivers/ni_6527.c b/drivers/staging/comedi/drivers/ni_6527.c
index 14e716e99a5c..54741c9e1af5 100644
--- a/drivers/staging/comedi/drivers/ni_6527.c
+++ b/drivers/staging/comedi/drivers/ni_6527.c
@@ -527,3 +527,7 @@ static void __exit driver_ni6527_cleanup_module(void)
527 527
528module_init(driver_ni6527_init_module); 528module_init(driver_ni6527_init_module);
529module_exit(driver_ni6527_cleanup_module); 529module_exit(driver_ni6527_cleanup_module);
530
531MODULE_AUTHOR("Comedi http://www.comedi.org");
532MODULE_DESCRIPTION("Comedi low-level driver");
533MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ni_65xx.c b/drivers/staging/comedi/drivers/ni_65xx.c
index 8b8e2aaf77fb..403fc0997d37 100644
--- a/drivers/staging/comedi/drivers/ni_65xx.c
+++ b/drivers/staging/comedi/drivers/ni_65xx.c
@@ -871,3 +871,7 @@ static void __exit driver_ni_65xx_cleanup_module(void)
871 871
872module_init(driver_ni_65xx_init_module); 872module_init(driver_ni_65xx_init_module);
873module_exit(driver_ni_65xx_cleanup_module); 873module_exit(driver_ni_65xx_cleanup_module);
874
875MODULE_AUTHOR("Comedi http://www.comedi.org");
876MODULE_DESCRIPTION("Comedi low-level driver");
877MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ni_660x.c b/drivers/staging/comedi/drivers/ni_660x.c
index 6612b085c4ef..ca2aeaa9449c 100644
--- a/drivers/staging/comedi/drivers/ni_660x.c
+++ b/drivers/staging/comedi/drivers/ni_660x.c
@@ -1421,3 +1421,7 @@ static int ni_660x_dio_insn_config(struct comedi_device *dev,
1421 }; 1421 };
1422 return 0; 1422 return 0;
1423} 1423}
1424
1425MODULE_AUTHOR("Comedi http://www.comedi.org");
1426MODULE_DESCRIPTION("Comedi low-level driver");
1427MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ni_670x.c b/drivers/staging/comedi/drivers/ni_670x.c
index e9f034efdc6f..d8d91f90060e 100644
--- a/drivers/staging/comedi/drivers/ni_670x.c
+++ b/drivers/staging/comedi/drivers/ni_670x.c
@@ -384,3 +384,7 @@ static int ni_670x_find_device(struct comedi_device *dev, int bus, int slot)
384 mite_list_devices(); 384 mite_list_devices();
385 return -EIO; 385 return -EIO;
386} 386}
387
388MODULE_AUTHOR("Comedi http://www.comedi.org");
389MODULE_DESCRIPTION("Comedi low-level driver");
390MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ni_pcidio.c b/drivers/staging/comedi/drivers/ni_pcidio.c
index 84a15c34e484..005d2fe86ee4 100644
--- a/drivers/staging/comedi/drivers/ni_pcidio.c
+++ b/drivers/staging/comedi/drivers/ni_pcidio.c
@@ -1354,3 +1354,7 @@ static void __exit driver_pcidio_cleanup_module(void)
1354 1354
1355module_init(driver_pcidio_init_module); 1355module_init(driver_pcidio_init_module);
1356module_exit(driver_pcidio_cleanup_module); 1356module_exit(driver_pcidio_cleanup_module);
1357
1358MODULE_AUTHOR("Comedi http://www.comedi.org");
1359MODULE_DESCRIPTION("Comedi low-level driver");
1360MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ni_pcimio.c b/drivers/staging/comedi/drivers/ni_pcimio.c
index 23a381247285..9148abdad074 100644
--- a/drivers/staging/comedi/drivers/ni_pcimio.c
+++ b/drivers/staging/comedi/drivers/ni_pcimio.c
@@ -1853,3 +1853,7 @@ static int pcimio_dio_change(struct comedi_device *dev,
1853 1853
1854 return 0; 1854 return 0;
1855} 1855}
1856
1857MODULE_AUTHOR("Comedi http://www.comedi.org");
1858MODULE_DESCRIPTION("Comedi low-level driver");
1859MODULE_LICENSE("GPL");
diff --git a/drivers/staging/hv/netvsc_drv.c b/drivers/staging/hv/netvsc_drv.c
index 54706a16dc0a..b41c9640b72d 100644
--- a/drivers/staging/hv/netvsc_drv.c
+++ b/drivers/staging/hv/netvsc_drv.c
@@ -236,6 +236,7 @@ static void netvsc_linkstatus_callback(struct hv_device *device_obj,
236 if (status == 1) { 236 if (status == 1) {
237 netif_carrier_on(net); 237 netif_carrier_on(net);
238 netif_wake_queue(net); 238 netif_wake_queue(net);
239 netif_notify_peers(net);
239 } else { 240 } else {
240 netif_carrier_off(net); 241 netif_carrier_off(net);
241 netif_stop_queue(net); 242 netif_stop_queue(net);
diff --git a/drivers/staging/intel_sst/intelmid_v2_control.c b/drivers/staging/intel_sst/intelmid_v2_control.c
index e38e89df6e84..e2f6d6a3c850 100644
--- a/drivers/staging/intel_sst/intelmid_v2_control.c
+++ b/drivers/staging/intel_sst/intelmid_v2_control.c
@@ -874,7 +874,10 @@ static int nc_set_selected_input_dev(u8 value)
874 sc_access[3].reg_addr = 0x109; 874 sc_access[3].reg_addr = 0x109;
875 sc_access[3].mask = MASK6; 875 sc_access[3].mask = MASK6;
876 sc_access[3].value = 0x00; 876 sc_access[3].value = 0x00;
877 num_val = 4; 877 sc_access[4].reg_addr = 0x104;
878 sc_access[4].value = 0x3C;
879 sc_access[4].mask = 0xff;
880 num_val = 5;
878 break; 881 break;
879 default: 882 default:
880 return -EINVAL; 883 return -EINVAL;
diff --git a/drivers/staging/lirc/lirc_zilog.c b/drivers/staging/lirc/lirc_zilog.c
index 3fe5f4160194..0aad0d7a74a3 100644
--- a/drivers/staging/lirc/lirc_zilog.c
+++ b/drivers/staging/lirc/lirc_zilog.c
@@ -495,7 +495,7 @@ static int send_data_block(struct IR_tx *tx, unsigned char *data_block)
495/* send boot data to the IR TX device */ 495/* send boot data to the IR TX device */
496static int send_boot_data(struct IR_tx *tx) 496static int send_boot_data(struct IR_tx *tx)
497{ 497{
498 int ret; 498 int ret, i;
499 unsigned char buf[4]; 499 unsigned char buf[4];
500 500
501 /* send the boot block */ 501 /* send the boot block */
@@ -503,7 +503,7 @@ static int send_boot_data(struct IR_tx *tx)
503 if (ret != 0) 503 if (ret != 0)
504 return ret; 504 return ret;
505 505
506 /* kick it off? */ 506 /* Hit the go button to activate the new boot data */
507 buf[0] = 0x00; 507 buf[0] = 0x00;
508 buf[1] = 0x20; 508 buf[1] = 0x20;
509 ret = i2c_master_send(tx->c, buf, 2); 509 ret = i2c_master_send(tx->c, buf, 2);
@@ -511,7 +511,19 @@ static int send_boot_data(struct IR_tx *tx)
511 zilog_error("i2c_master_send failed with %d\n", ret); 511 zilog_error("i2c_master_send failed with %d\n", ret);
512 return ret < 0 ? ret : -EFAULT; 512 return ret < 0 ? ret : -EFAULT;
513 } 513 }
514 ret = i2c_master_send(tx->c, buf, 1); 514
515 /*
516 * Wait for zilog to settle after hitting go post boot block upload.
517 * Without this delay, the HD-PVR and HVR-1950 both return an -EIO
518 * upon attempting to get firmware revision, and tx probe thus fails.
519 */
520 for (i = 0; i < 10; i++) {
521 ret = i2c_master_send(tx->c, buf, 1);
522 if (ret == 1)
523 break;
524 udelay(100);
525 }
526
515 if (ret != 1) { 527 if (ret != 1) {
516 zilog_error("i2c_master_send failed with %d\n", ret); 528 zilog_error("i2c_master_send failed with %d\n", ret);
517 return ret < 0 ? ret : -EFAULT; 529 return ret < 0 ? ret : -EFAULT;
@@ -523,8 +535,8 @@ static int send_boot_data(struct IR_tx *tx)
523 zilog_error("i2c_master_recv failed with %d\n", ret); 535 zilog_error("i2c_master_recv failed with %d\n", ret);
524 return 0; 536 return 0;
525 } 537 }
526 if (buf[0] != 0x80) { 538 if ((buf[0] != 0x80) && (buf[0] != 0xa0)) {
527 zilog_error("unexpected IR TX response: %02x\n", buf[0]); 539 zilog_error("unexpected IR TX init response: %02x\n", buf[0]);
528 return 0; 540 return 0;
529 } 541 }
530 zilog_notify("Zilog/Hauppauge IR blaster firmware version " 542 zilog_notify("Zilog/Hauppauge IR blaster firmware version "
@@ -827,7 +839,15 @@ static int send_code(struct IR_tx *tx, unsigned int code, unsigned int key)
827 zilog_error("i2c_master_send failed with %d\n", ret); 839 zilog_error("i2c_master_send failed with %d\n", ret);
828 return ret < 0 ? ret : -EFAULT; 840 return ret < 0 ? ret : -EFAULT;
829 } 841 }
830 ret = i2c_master_send(tx->c, buf, 1); 842
843 /* Give the z8 a moment to process data block */
844 for (i = 0; i < 10; i++) {
845 ret = i2c_master_send(tx->c, buf, 1);
846 if (ret == 1)
847 break;
848 udelay(100);
849 }
850
831 if (ret != 1) { 851 if (ret != 1) {
832 zilog_error("i2c_master_send failed with %d\n", ret); 852 zilog_error("i2c_master_send failed with %d\n", ret);
833 return ret < 0 ? ret : -EFAULT; 853 return ret < 0 ? ret : -EFAULT;
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
index 5415712f01f8..4bd8cbdaee76 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -227,6 +227,7 @@ static int zram_read(struct zram *zram, struct bio *bio)
227 227
228 if (zram_test_flag(zram, index, ZRAM_ZERO)) { 228 if (zram_test_flag(zram, index, ZRAM_ZERO)) {
229 handle_zero_page(page); 229 handle_zero_page(page);
230 index++;
230 continue; 231 continue;
231 } 232 }
232 233
@@ -235,12 +236,14 @@ static int zram_read(struct zram *zram, struct bio *bio)
235 pr_debug("Read before write: sector=%lu, size=%u", 236 pr_debug("Read before write: sector=%lu, size=%u",
236 (ulong)(bio->bi_sector), bio->bi_size); 237 (ulong)(bio->bi_sector), bio->bi_size);
237 /* Do nothing */ 238 /* Do nothing */
239 index++;
238 continue; 240 continue;
239 } 241 }
240 242
241 /* Page is stored uncompressed since it's incompressible */ 243 /* Page is stored uncompressed since it's incompressible */
242 if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) { 244 if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
243 handle_uncompressed_page(zram, page, index); 245 handle_uncompressed_page(zram, page, index);
246 index++;
244 continue; 247 continue;
245 } 248 }
246 249
@@ -320,6 +323,7 @@ static int zram_write(struct zram *zram, struct bio *bio)
320 mutex_unlock(&zram->lock); 323 mutex_unlock(&zram->lock);
321 zram_stat_inc(&zram->stats.pages_zero); 324 zram_stat_inc(&zram->stats.pages_zero);
322 zram_set_flag(zram, index, ZRAM_ZERO); 325 zram_set_flag(zram, index, ZRAM_ZERO);
326 index++;
323 continue; 327 continue;
324 } 328 }
325 329
diff --git a/drivers/target/Makefile b/drivers/target/Makefile
index 5cfd70819f08..973bb190ef57 100644
--- a/drivers/target/Makefile
+++ b/drivers/target/Makefile
@@ -13,8 +13,7 @@ target_core_mod-y := target_core_configfs.o \
13 target_core_transport.o \ 13 target_core_transport.o \
14 target_core_cdb.o \ 14 target_core_cdb.o \
15 target_core_ua.o \ 15 target_core_ua.o \
16 target_core_rd.o \ 16 target_core_rd.o
17 target_core_mib.o
18 17
19obj-$(CONFIG_TARGET_CORE) += target_core_mod.o 18obj-$(CONFIG_TARGET_CORE) += target_core_mod.o
20 19
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 2764510798b0..caf8dc18ee0a 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -37,7 +37,6 @@
37#include <linux/parser.h> 37#include <linux/parser.h>
38#include <linux/syscalls.h> 38#include <linux/syscalls.h>
39#include <linux/configfs.h> 39#include <linux/configfs.h>
40#include <linux/proc_fs.h>
41 40
42#include <target/target_core_base.h> 41#include <target/target_core_base.h>
43#include <target/target_core_device.h> 42#include <target/target_core_device.h>
@@ -1971,13 +1970,35 @@ static void target_core_dev_release(struct config_item *item)
1971{ 1970{
1972 struct se_subsystem_dev *se_dev = container_of(to_config_group(item), 1971 struct se_subsystem_dev *se_dev = container_of(to_config_group(item),
1973 struct se_subsystem_dev, se_dev_group); 1972 struct se_subsystem_dev, se_dev_group);
1974 struct config_group *dev_cg; 1973 struct se_hba *hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item);
1975 1974 struct se_subsystem_api *t = hba->transport;
1976 if (!(se_dev)) 1975 struct config_group *dev_cg = &se_dev->se_dev_group;
1977 return;
1978 1976
1979 dev_cg = &se_dev->se_dev_group;
1980 kfree(dev_cg->default_groups); 1977 kfree(dev_cg->default_groups);
1978 /*
1979 * This pointer will set when the storage is enabled with:
1980 *`echo 1 > $CONFIGFS/core/$HBA/$DEV/dev_enable`
1981 */
1982 if (se_dev->se_dev_ptr) {
1983 printk(KERN_INFO "Target_Core_ConfigFS: Calling se_free_"
1984 "virtual_device() for se_dev_ptr: %p\n",
1985 se_dev->se_dev_ptr);
1986
1987 se_free_virtual_device(se_dev->se_dev_ptr, hba);
1988 } else {
1989 /*
1990 * Release struct se_subsystem_dev->se_dev_su_ptr..
1991 */
1992 printk(KERN_INFO "Target_Core_ConfigFS: Calling t->free_"
1993 "device() for se_dev_su_ptr: %p\n",
1994 se_dev->se_dev_su_ptr);
1995
1996 t->free_device(se_dev->se_dev_su_ptr);
1997 }
1998
1999 printk(KERN_INFO "Target_Core_ConfigFS: Deallocating se_subsystem"
2000 "_dev_t: %p\n", se_dev);
2001 kfree(se_dev);
1981} 2002}
1982 2003
1983static ssize_t target_core_dev_show(struct config_item *item, 2004static ssize_t target_core_dev_show(struct config_item *item,
@@ -2140,7 +2161,16 @@ static struct configfs_attribute *target_core_alua_lu_gp_attrs[] = {
2140 NULL, 2161 NULL,
2141}; 2162};
2142 2163
2164static void target_core_alua_lu_gp_release(struct config_item *item)
2165{
2166 struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item),
2167 struct t10_alua_lu_gp, lu_gp_group);
2168
2169 core_alua_free_lu_gp(lu_gp);
2170}
2171
2143static struct configfs_item_operations target_core_alua_lu_gp_ops = { 2172static struct configfs_item_operations target_core_alua_lu_gp_ops = {
2173 .release = target_core_alua_lu_gp_release,
2144 .show_attribute = target_core_alua_lu_gp_attr_show, 2174 .show_attribute = target_core_alua_lu_gp_attr_show,
2145 .store_attribute = target_core_alua_lu_gp_attr_store, 2175 .store_attribute = target_core_alua_lu_gp_attr_store,
2146}; 2176};
@@ -2191,9 +2221,11 @@ static void target_core_alua_drop_lu_gp(
2191 printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Logical Unit" 2221 printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Logical Unit"
2192 " Group: core/alua/lu_gps/%s, ID: %hu\n", 2222 " Group: core/alua/lu_gps/%s, ID: %hu\n",
2193 config_item_name(item), lu_gp->lu_gp_id); 2223 config_item_name(item), lu_gp->lu_gp_id);
2194 2224 /*
2225 * core_alua_free_lu_gp() is called from target_core_alua_lu_gp_ops->release()
2226 * -> target_core_alua_lu_gp_release()
2227 */
2195 config_item_put(item); 2228 config_item_put(item);
2196 core_alua_free_lu_gp(lu_gp);
2197} 2229}
2198 2230
2199static struct configfs_group_operations target_core_alua_lu_gps_group_ops = { 2231static struct configfs_group_operations target_core_alua_lu_gps_group_ops = {
@@ -2549,7 +2581,16 @@ static struct configfs_attribute *target_core_alua_tg_pt_gp_attrs[] = {
2549 NULL, 2581 NULL,
2550}; 2582};
2551 2583
2584static void target_core_alua_tg_pt_gp_release(struct config_item *item)
2585{
2586 struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item),
2587 struct t10_alua_tg_pt_gp, tg_pt_gp_group);
2588
2589 core_alua_free_tg_pt_gp(tg_pt_gp);
2590}
2591
2552static struct configfs_item_operations target_core_alua_tg_pt_gp_ops = { 2592static struct configfs_item_operations target_core_alua_tg_pt_gp_ops = {
2593 .release = target_core_alua_tg_pt_gp_release,
2553 .show_attribute = target_core_alua_tg_pt_gp_attr_show, 2594 .show_attribute = target_core_alua_tg_pt_gp_attr_show,
2554 .store_attribute = target_core_alua_tg_pt_gp_attr_store, 2595 .store_attribute = target_core_alua_tg_pt_gp_attr_store,
2555}; 2596};
@@ -2602,9 +2643,11 @@ static void target_core_alua_drop_tg_pt_gp(
2602 printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Target Port" 2643 printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Target Port"
2603 " Group: alua/tg_pt_gps/%s, ID: %hu\n", 2644 " Group: alua/tg_pt_gps/%s, ID: %hu\n",
2604 config_item_name(item), tg_pt_gp->tg_pt_gp_id); 2645 config_item_name(item), tg_pt_gp->tg_pt_gp_id);
2605 2646 /*
2647 * core_alua_free_tg_pt_gp() is called from target_core_alua_tg_pt_gp_ops->release()
2648 * -> target_core_alua_tg_pt_gp_release().
2649 */
2606 config_item_put(item); 2650 config_item_put(item);
2607 core_alua_free_tg_pt_gp(tg_pt_gp);
2608} 2651}
2609 2652
2610static struct configfs_group_operations target_core_alua_tg_pt_gps_group_ops = { 2653static struct configfs_group_operations target_core_alua_tg_pt_gps_group_ops = {
@@ -2771,13 +2814,11 @@ static void target_core_drop_subdev(
2771 struct se_subsystem_api *t; 2814 struct se_subsystem_api *t;
2772 struct config_item *df_item; 2815 struct config_item *df_item;
2773 struct config_group *dev_cg, *tg_pt_gp_cg; 2816 struct config_group *dev_cg, *tg_pt_gp_cg;
2774 int i, ret; 2817 int i;
2775 2818
2776 hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item); 2819 hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item);
2777 2820
2778 if (mutex_lock_interruptible(&hba->hba_access_mutex)) 2821 mutex_lock(&hba->hba_access_mutex);
2779 goto out;
2780
2781 t = hba->transport; 2822 t = hba->transport;
2782 2823
2783 spin_lock(&se_global->g_device_lock); 2824 spin_lock(&se_global->g_device_lock);
@@ -2791,7 +2832,10 @@ static void target_core_drop_subdev(
2791 config_item_put(df_item); 2832 config_item_put(df_item);
2792 } 2833 }
2793 kfree(tg_pt_gp_cg->default_groups); 2834 kfree(tg_pt_gp_cg->default_groups);
2794 core_alua_free_tg_pt_gp(T10_ALUA(se_dev)->default_tg_pt_gp); 2835 /*
2836 * core_alua_free_tg_pt_gp() is called from ->default_tg_pt_gp
2837 * directly from target_core_alua_tg_pt_gp_release().
2838 */
2795 T10_ALUA(se_dev)->default_tg_pt_gp = NULL; 2839 T10_ALUA(se_dev)->default_tg_pt_gp = NULL;
2796 2840
2797 dev_cg = &se_dev->se_dev_group; 2841 dev_cg = &se_dev->se_dev_group;
@@ -2800,38 +2844,12 @@ static void target_core_drop_subdev(
2800 dev_cg->default_groups[i] = NULL; 2844 dev_cg->default_groups[i] = NULL;
2801 config_item_put(df_item); 2845 config_item_put(df_item);
2802 } 2846 }
2803
2804 config_item_put(item);
2805 /* 2847 /*
2806 * This pointer will set when the storage is enabled with: 2848 * The releasing of se_dev and associated se_dev->se_dev_ptr is done
2807 * `echo 1 > $CONFIGFS/core/$HBA/$DEV/dev_enable` 2849 * from target_core_dev_item_ops->release() ->target_core_dev_release().
2808 */ 2850 */
2809 if (se_dev->se_dev_ptr) { 2851 config_item_put(item);
2810 printk(KERN_INFO "Target_Core_ConfigFS: Calling se_free_"
2811 "virtual_device() for se_dev_ptr: %p\n",
2812 se_dev->se_dev_ptr);
2813
2814 ret = se_free_virtual_device(se_dev->se_dev_ptr, hba);
2815 if (ret < 0)
2816 goto hba_out;
2817 } else {
2818 /*
2819 * Release struct se_subsystem_dev->se_dev_su_ptr..
2820 */
2821 printk(KERN_INFO "Target_Core_ConfigFS: Calling t->free_"
2822 "device() for se_dev_su_ptr: %p\n",
2823 se_dev->se_dev_su_ptr);
2824
2825 t->free_device(se_dev->se_dev_su_ptr);
2826 }
2827
2828 printk(KERN_INFO "Target_Core_ConfigFS: Deallocating se_subsystem"
2829 "_dev_t: %p\n", se_dev);
2830
2831hba_out:
2832 mutex_unlock(&hba->hba_access_mutex); 2852 mutex_unlock(&hba->hba_access_mutex);
2833out:
2834 kfree(se_dev);
2835} 2853}
2836 2854
2837static struct configfs_group_operations target_core_hba_group_ops = { 2855static struct configfs_group_operations target_core_hba_group_ops = {
@@ -2914,6 +2932,13 @@ SE_HBA_ATTR(hba_mode, S_IRUGO | S_IWUSR);
2914 2932
2915CONFIGFS_EATTR_OPS(target_core_hba, se_hba, hba_group); 2933CONFIGFS_EATTR_OPS(target_core_hba, se_hba, hba_group);
2916 2934
2935static void target_core_hba_release(struct config_item *item)
2936{
2937 struct se_hba *hba = container_of(to_config_group(item),
2938 struct se_hba, hba_group);
2939 core_delete_hba(hba);
2940}
2941
2917static struct configfs_attribute *target_core_hba_attrs[] = { 2942static struct configfs_attribute *target_core_hba_attrs[] = {
2918 &target_core_hba_hba_info.attr, 2943 &target_core_hba_hba_info.attr,
2919 &target_core_hba_hba_mode.attr, 2944 &target_core_hba_hba_mode.attr,
@@ -2921,6 +2946,7 @@ static struct configfs_attribute *target_core_hba_attrs[] = {
2921}; 2946};
2922 2947
2923static struct configfs_item_operations target_core_hba_item_ops = { 2948static struct configfs_item_operations target_core_hba_item_ops = {
2949 .release = target_core_hba_release,
2924 .show_attribute = target_core_hba_attr_show, 2950 .show_attribute = target_core_hba_attr_show,
2925 .store_attribute = target_core_hba_attr_store, 2951 .store_attribute = target_core_hba_attr_store,
2926}; 2952};
@@ -2997,10 +3023,11 @@ static void target_core_call_delhbafromtarget(
2997 struct config_group *group, 3023 struct config_group *group,
2998 struct config_item *item) 3024 struct config_item *item)
2999{ 3025{
3000 struct se_hba *hba = item_to_hba(item); 3026 /*
3001 3027 * core_delete_hba() is called from target_core_hba_item_ops->release()
3028 * -> target_core_hba_release()
3029 */
3002 config_item_put(item); 3030 config_item_put(item);
3003 core_delete_hba(hba);
3004} 3031}
3005 3032
3006static struct configfs_group_operations target_core_group_ops = { 3033static struct configfs_group_operations target_core_group_ops = {
@@ -3022,7 +3049,6 @@ static int target_core_init_configfs(void)
3022 struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL; 3049 struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL;
3023 struct config_group *lu_gp_cg = NULL; 3050 struct config_group *lu_gp_cg = NULL;
3024 struct configfs_subsystem *subsys; 3051 struct configfs_subsystem *subsys;
3025 struct proc_dir_entry *scsi_target_proc = NULL;
3026 struct t10_alua_lu_gp *lu_gp; 3052 struct t10_alua_lu_gp *lu_gp;
3027 int ret; 3053 int ret;
3028 3054
@@ -3128,21 +3154,10 @@ static int target_core_init_configfs(void)
3128 if (core_dev_setup_virtual_lun0() < 0) 3154 if (core_dev_setup_virtual_lun0() < 0)
3129 goto out; 3155 goto out;
3130 3156
3131 scsi_target_proc = proc_mkdir("scsi_target", 0);
3132 if (!(scsi_target_proc)) {
3133 printk(KERN_ERR "proc_mkdir(scsi_target, 0) failed\n");
3134 goto out;
3135 }
3136 ret = init_scsi_target_mib();
3137 if (ret < 0)
3138 goto out;
3139
3140 return 0; 3157 return 0;
3141 3158
3142out: 3159out:
3143 configfs_unregister_subsystem(subsys); 3160 configfs_unregister_subsystem(subsys);
3144 if (scsi_target_proc)
3145 remove_proc_entry("scsi_target", 0);
3146 core_dev_release_virtual_lun0(); 3161 core_dev_release_virtual_lun0();
3147 rd_module_exit(); 3162 rd_module_exit();
3148out_global: 3163out_global:
@@ -3178,8 +3193,7 @@ static void target_core_exit_configfs(void)
3178 config_item_put(item); 3193 config_item_put(item);
3179 } 3194 }
3180 kfree(lu_gp_cg->default_groups); 3195 kfree(lu_gp_cg->default_groups);
3181 core_alua_free_lu_gp(se_global->default_lu_gp); 3196 lu_gp_cg->default_groups = NULL;
3182 se_global->default_lu_gp = NULL;
3183 3197
3184 alua_cg = &se_global->alua_group; 3198 alua_cg = &se_global->alua_group;
3185 for (i = 0; alua_cg->default_groups[i]; i++) { 3199 for (i = 0; alua_cg->default_groups[i]; i++) {
@@ -3188,6 +3202,7 @@ static void target_core_exit_configfs(void)
3188 config_item_put(item); 3202 config_item_put(item);
3189 } 3203 }
3190 kfree(alua_cg->default_groups); 3204 kfree(alua_cg->default_groups);
3205 alua_cg->default_groups = NULL;
3191 3206
3192 hba_cg = &se_global->target_core_hbagroup; 3207 hba_cg = &se_global->target_core_hbagroup;
3193 for (i = 0; hba_cg->default_groups[i]; i++) { 3208 for (i = 0; hba_cg->default_groups[i]; i++) {
@@ -3196,20 +3211,20 @@ static void target_core_exit_configfs(void)
3196 config_item_put(item); 3211 config_item_put(item);
3197 } 3212 }
3198 kfree(hba_cg->default_groups); 3213 kfree(hba_cg->default_groups);
3199 3214 hba_cg->default_groups = NULL;
3200 for (i = 0; subsys->su_group.default_groups[i]; i++) { 3215 /*
3201 item = &subsys->su_group.default_groups[i]->cg_item; 3216 * We expect subsys->su_group.default_groups to be released
3202 subsys->su_group.default_groups[i] = NULL; 3217 * by configfs subsystem provider logic..
3203 config_item_put(item); 3218 */
3204 } 3219 configfs_unregister_subsystem(subsys);
3205 kfree(subsys->su_group.default_groups); 3220 kfree(subsys->su_group.default_groups);
3206 3221
3207 configfs_unregister_subsystem(subsys); 3222 core_alua_free_lu_gp(se_global->default_lu_gp);
3223 se_global->default_lu_gp = NULL;
3224
3208 printk(KERN_INFO "TARGET_CORE[0]: Released ConfigFS Fabric" 3225 printk(KERN_INFO "TARGET_CORE[0]: Released ConfigFS Fabric"
3209 " Infrastructure\n"); 3226 " Infrastructure\n");
3210 3227
3211 remove_scsi_target_mib();
3212 remove_proc_entry("scsi_target", 0);
3213 core_dev_release_virtual_lun0(); 3228 core_dev_release_virtual_lun0();
3214 rd_module_exit(); 3229 rd_module_exit();
3215 release_se_global(); 3230 release_se_global();
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 317ce58d426d..5da051a07fa3 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -373,11 +373,11 @@ int core_update_device_list_for_node(
373 /* 373 /*
374 * deve->se_lun_acl will be NULL for demo-mode created LUNs 374 * deve->se_lun_acl will be NULL for demo-mode created LUNs
375 * that have not been explictly concerted to MappedLUNs -> 375 * that have not been explictly concerted to MappedLUNs ->
376 * struct se_lun_acl. 376 * struct se_lun_acl, but we remove deve->alua_port_list from
377 * port->sep_alua_list. This also means that active UAs and
378 * NodeACL context specific PR metadata for demo-mode
379 * MappedLUN *deve will be released below..
377 */ 380 */
378 if (!(deve->se_lun_acl))
379 return 0;
380
381 spin_lock_bh(&port->sep_alua_lock); 381 spin_lock_bh(&port->sep_alua_lock);
382 list_del(&deve->alua_port_list); 382 list_del(&deve->alua_port_list);
383 spin_unlock_bh(&port->sep_alua_lock); 383 spin_unlock_bh(&port->sep_alua_lock);
@@ -395,12 +395,14 @@ int core_update_device_list_for_node(
395 printk(KERN_ERR "struct se_dev_entry->se_lun_acl" 395 printk(KERN_ERR "struct se_dev_entry->se_lun_acl"
396 " already set for demo mode -> explict" 396 " already set for demo mode -> explict"
397 " LUN ACL transition\n"); 397 " LUN ACL transition\n");
398 spin_unlock_irq(&nacl->device_list_lock);
398 return -1; 399 return -1;
399 } 400 }
400 if (deve->se_lun != lun) { 401 if (deve->se_lun != lun) {
401 printk(KERN_ERR "struct se_dev_entry->se_lun does" 402 printk(KERN_ERR "struct se_dev_entry->se_lun does"
402 " match passed struct se_lun for demo mode" 403 " match passed struct se_lun for demo mode"
403 " -> explict LUN ACL transition\n"); 404 " -> explict LUN ACL transition\n");
405 spin_unlock_irq(&nacl->device_list_lock);
404 return -1; 406 return -1;
405 } 407 }
406 deve->se_lun_acl = lun_acl; 408 deve->se_lun_acl = lun_acl;
@@ -865,9 +867,6 @@ static void se_dev_stop(struct se_device *dev)
865 } 867 }
866 } 868 }
867 spin_unlock(&hba->device_lock); 869 spin_unlock(&hba->device_lock);
868
869 while (atomic_read(&hba->dev_mib_access_count))
870 cpu_relax();
871} 870}
872 871
873int se_dev_check_online(struct se_device *dev) 872int se_dev_check_online(struct se_device *dev)
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index 32b148d7e261..b65d1c8e7740 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -214,12 +214,22 @@ TCM_MAPPEDLUN_ATTR(write_protect, S_IRUGO | S_IWUSR);
214 214
215CONFIGFS_EATTR_OPS(target_fabric_mappedlun, se_lun_acl, se_lun_group); 215CONFIGFS_EATTR_OPS(target_fabric_mappedlun, se_lun_acl, se_lun_group);
216 216
217static void target_fabric_mappedlun_release(struct config_item *item)
218{
219 struct se_lun_acl *lacl = container_of(to_config_group(item),
220 struct se_lun_acl, se_lun_group);
221 struct se_portal_group *se_tpg = lacl->se_lun_nacl->se_tpg;
222
223 core_dev_free_initiator_node_lun_acl(se_tpg, lacl);
224}
225
217static struct configfs_attribute *target_fabric_mappedlun_attrs[] = { 226static struct configfs_attribute *target_fabric_mappedlun_attrs[] = {
218 &target_fabric_mappedlun_write_protect.attr, 227 &target_fabric_mappedlun_write_protect.attr,
219 NULL, 228 NULL,
220}; 229};
221 230
222static struct configfs_item_operations target_fabric_mappedlun_item_ops = { 231static struct configfs_item_operations target_fabric_mappedlun_item_ops = {
232 .release = target_fabric_mappedlun_release,
223 .show_attribute = target_fabric_mappedlun_attr_show, 233 .show_attribute = target_fabric_mappedlun_attr_show,
224 .store_attribute = target_fabric_mappedlun_attr_store, 234 .store_attribute = target_fabric_mappedlun_attr_store,
225 .allow_link = target_fabric_mappedlun_link, 235 .allow_link = target_fabric_mappedlun_link,
@@ -337,15 +347,21 @@ static void target_fabric_drop_mappedlun(
337 struct config_group *group, 347 struct config_group *group,
338 struct config_item *item) 348 struct config_item *item)
339{ 349{
340 struct se_lun_acl *lacl = container_of(to_config_group(item),
341 struct se_lun_acl, se_lun_group);
342 struct se_portal_group *se_tpg = lacl->se_lun_nacl->se_tpg;
343
344 config_item_put(item); 350 config_item_put(item);
345 core_dev_free_initiator_node_lun_acl(se_tpg, lacl); 351}
352
353static void target_fabric_nacl_base_release(struct config_item *item)
354{
355 struct se_node_acl *se_nacl = container_of(to_config_group(item),
356 struct se_node_acl, acl_group);
357 struct se_portal_group *se_tpg = se_nacl->se_tpg;
358 struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
359
360 tf->tf_ops.fabric_drop_nodeacl(se_nacl);
346} 361}
347 362
348static struct configfs_item_operations target_fabric_nacl_base_item_ops = { 363static struct configfs_item_operations target_fabric_nacl_base_item_ops = {
364 .release = target_fabric_nacl_base_release,
349 .show_attribute = target_fabric_nacl_base_attr_show, 365 .show_attribute = target_fabric_nacl_base_attr_show,
350 .store_attribute = target_fabric_nacl_base_attr_store, 366 .store_attribute = target_fabric_nacl_base_attr_store,
351}; 367};
@@ -404,9 +420,6 @@ static void target_fabric_drop_nodeacl(
404 struct config_group *group, 420 struct config_group *group,
405 struct config_item *item) 421 struct config_item *item)
406{ 422{
407 struct se_portal_group *se_tpg = container_of(group,
408 struct se_portal_group, tpg_acl_group);
409 struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
410 struct se_node_acl *se_nacl = container_of(to_config_group(item), 423 struct se_node_acl *se_nacl = container_of(to_config_group(item),
411 struct se_node_acl, acl_group); 424 struct se_node_acl, acl_group);
412 struct config_item *df_item; 425 struct config_item *df_item;
@@ -419,9 +432,10 @@ static void target_fabric_drop_nodeacl(
419 nacl_cg->default_groups[i] = NULL; 432 nacl_cg->default_groups[i] = NULL;
420 config_item_put(df_item); 433 config_item_put(df_item);
421 } 434 }
422 435 /*
436 * struct se_node_acl free is done in target_fabric_nacl_base_release()
437 */
423 config_item_put(item); 438 config_item_put(item);
424 tf->tf_ops.fabric_drop_nodeacl(se_nacl);
425} 439}
426 440
427static struct configfs_group_operations target_fabric_nacl_group_ops = { 441static struct configfs_group_operations target_fabric_nacl_group_ops = {
@@ -437,7 +451,18 @@ TF_CIT_SETUP(tpg_nacl, NULL, &target_fabric_nacl_group_ops, NULL);
437 451
438CONFIGFS_EATTR_OPS(target_fabric_np_base, se_tpg_np, tpg_np_group); 452CONFIGFS_EATTR_OPS(target_fabric_np_base, se_tpg_np, tpg_np_group);
439 453
454static void target_fabric_np_base_release(struct config_item *item)
455{
456 struct se_tpg_np *se_tpg_np = container_of(to_config_group(item),
457 struct se_tpg_np, tpg_np_group);
458 struct se_portal_group *se_tpg = se_tpg_np->tpg_np_parent;
459 struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
460
461 tf->tf_ops.fabric_drop_np(se_tpg_np);
462}
463
440static struct configfs_item_operations target_fabric_np_base_item_ops = { 464static struct configfs_item_operations target_fabric_np_base_item_ops = {
465 .release = target_fabric_np_base_release,
441 .show_attribute = target_fabric_np_base_attr_show, 466 .show_attribute = target_fabric_np_base_attr_show,
442 .store_attribute = target_fabric_np_base_attr_store, 467 .store_attribute = target_fabric_np_base_attr_store,
443}; 468};
@@ -466,6 +491,7 @@ static struct config_group *target_fabric_make_np(
466 if (!(se_tpg_np) || IS_ERR(se_tpg_np)) 491 if (!(se_tpg_np) || IS_ERR(se_tpg_np))
467 return ERR_PTR(-EINVAL); 492 return ERR_PTR(-EINVAL);
468 493
494 se_tpg_np->tpg_np_parent = se_tpg;
469 config_group_init_type_name(&se_tpg_np->tpg_np_group, name, 495 config_group_init_type_name(&se_tpg_np->tpg_np_group, name,
470 &TF_CIT_TMPL(tf)->tfc_tpg_np_base_cit); 496 &TF_CIT_TMPL(tf)->tfc_tpg_np_base_cit);
471 497
@@ -476,14 +502,10 @@ static void target_fabric_drop_np(
476 struct config_group *group, 502 struct config_group *group,
477 struct config_item *item) 503 struct config_item *item)
478{ 504{
479 struct se_portal_group *se_tpg = container_of(group, 505 /*
480 struct se_portal_group, tpg_np_group); 506 * struct se_tpg_np is released via target_fabric_np_base_release()
481 struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; 507 */
482 struct se_tpg_np *se_tpg_np = container_of(to_config_group(item),
483 struct se_tpg_np, tpg_np_group);
484
485 config_item_put(item); 508 config_item_put(item);
486 tf->tf_ops.fabric_drop_np(se_tpg_np);
487} 509}
488 510
489static struct configfs_group_operations target_fabric_np_group_ops = { 511static struct configfs_group_operations target_fabric_np_group_ops = {
@@ -814,7 +836,18 @@ TF_CIT_SETUP(tpg_param, &target_fabric_tpg_param_item_ops, NULL, NULL);
814 */ 836 */
815CONFIGFS_EATTR_OPS(target_fabric_tpg, se_portal_group, tpg_group); 837CONFIGFS_EATTR_OPS(target_fabric_tpg, se_portal_group, tpg_group);
816 838
839static void target_fabric_tpg_release(struct config_item *item)
840{
841 struct se_portal_group *se_tpg = container_of(to_config_group(item),
842 struct se_portal_group, tpg_group);
843 struct se_wwn *wwn = se_tpg->se_tpg_wwn;
844 struct target_fabric_configfs *tf = wwn->wwn_tf;
845
846 tf->tf_ops.fabric_drop_tpg(se_tpg);
847}
848
817static struct configfs_item_operations target_fabric_tpg_base_item_ops = { 849static struct configfs_item_operations target_fabric_tpg_base_item_ops = {
850 .release = target_fabric_tpg_release,
818 .show_attribute = target_fabric_tpg_attr_show, 851 .show_attribute = target_fabric_tpg_attr_show,
819 .store_attribute = target_fabric_tpg_attr_store, 852 .store_attribute = target_fabric_tpg_attr_store,
820}; 853};
@@ -872,8 +905,6 @@ static void target_fabric_drop_tpg(
872 struct config_group *group, 905 struct config_group *group,
873 struct config_item *item) 906 struct config_item *item)
874{ 907{
875 struct se_wwn *wwn = container_of(group, struct se_wwn, wwn_group);
876 struct target_fabric_configfs *tf = wwn->wwn_tf;
877 struct se_portal_group *se_tpg = container_of(to_config_group(item), 908 struct se_portal_group *se_tpg = container_of(to_config_group(item),
878 struct se_portal_group, tpg_group); 909 struct se_portal_group, tpg_group);
879 struct config_group *tpg_cg = &se_tpg->tpg_group; 910 struct config_group *tpg_cg = &se_tpg->tpg_group;
@@ -890,15 +921,28 @@ static void target_fabric_drop_tpg(
890 } 921 }
891 922
892 config_item_put(item); 923 config_item_put(item);
893 tf->tf_ops.fabric_drop_tpg(se_tpg);
894} 924}
895 925
926static void target_fabric_release_wwn(struct config_item *item)
927{
928 struct se_wwn *wwn = container_of(to_config_group(item),
929 struct se_wwn, wwn_group);
930 struct target_fabric_configfs *tf = wwn->wwn_tf;
931
932 tf->tf_ops.fabric_drop_wwn(wwn);
933}
934
935static struct configfs_item_operations target_fabric_tpg_item_ops = {
936 .release = target_fabric_release_wwn,
937};
938
896static struct configfs_group_operations target_fabric_tpg_group_ops = { 939static struct configfs_group_operations target_fabric_tpg_group_ops = {
897 .make_group = target_fabric_make_tpg, 940 .make_group = target_fabric_make_tpg,
898 .drop_item = target_fabric_drop_tpg, 941 .drop_item = target_fabric_drop_tpg,
899}; 942};
900 943
901TF_CIT_SETUP(tpg, NULL, &target_fabric_tpg_group_ops, NULL); 944TF_CIT_SETUP(tpg, &target_fabric_tpg_item_ops, &target_fabric_tpg_group_ops,
945 NULL);
902 946
903/* End of tfc_tpg_cit */ 947/* End of tfc_tpg_cit */
904 948
@@ -932,13 +976,7 @@ static void target_fabric_drop_wwn(
932 struct config_group *group, 976 struct config_group *group,
933 struct config_item *item) 977 struct config_item *item)
934{ 978{
935 struct target_fabric_configfs *tf = container_of(group,
936 struct target_fabric_configfs, tf_group);
937 struct se_wwn *wwn = container_of(to_config_group(item),
938 struct se_wwn, wwn_group);
939
940 config_item_put(item); 979 config_item_put(item);
941 tf->tf_ops.fabric_drop_wwn(wwn);
942} 980}
943 981
944static struct configfs_group_operations target_fabric_wwn_group_ops = { 982static struct configfs_group_operations target_fabric_wwn_group_ops = {
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index c6e0d757e76e..67f0c09983c8 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -154,7 +154,7 @@ static struct se_device *iblock_create_virtdevice(
154 154
155 bd = blkdev_get_by_path(ib_dev->ibd_udev_path, 155 bd = blkdev_get_by_path(ib_dev->ibd_udev_path,
156 FMODE_WRITE|FMODE_READ|FMODE_EXCL, ib_dev); 156 FMODE_WRITE|FMODE_READ|FMODE_EXCL, ib_dev);
157 if (!(bd)) 157 if (IS_ERR(bd))
158 goto failed; 158 goto failed;
159 /* 159 /*
160 * Setup the local scope queue_limits from struct request_queue->limits 160 * Setup the local scope queue_limits from struct request_queue->limits
@@ -220,8 +220,10 @@ static void iblock_free_device(void *p)
220{ 220{
221 struct iblock_dev *ib_dev = p; 221 struct iblock_dev *ib_dev = p;
222 222
223 blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); 223 if (ib_dev->ibd_bd != NULL)
224 bioset_free(ib_dev->ibd_bio_set); 224 blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
225 if (ib_dev->ibd_bio_set != NULL)
226 bioset_free(ib_dev->ibd_bio_set);
225 kfree(ib_dev); 227 kfree(ib_dev);
226} 228}
227 229
diff --git a/drivers/target/target_core_mib.c b/drivers/target/target_core_mib.c
deleted file mode 100644
index d5a48aa0d2d1..000000000000
--- a/drivers/target/target_core_mib.c
+++ /dev/null
@@ -1,1078 +0,0 @@
1/*******************************************************************************
2 * Filename: target_core_mib.c
3 *
4 * Copyright (c) 2006-2007 SBE, Inc. All Rights Reserved.
5 * Copyright (c) 2007-2010 Rising Tide Systems
6 * Copyright (c) 2008-2010 Linux-iSCSI.org
7 *
8 * Nicholas A. Bellinger <nab@linux-iscsi.org>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 *
24 ******************************************************************************/
25
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/delay.h>
30#include <linux/timer.h>
31#include <linux/string.h>
32#include <linux/version.h>
33#include <generated/utsrelease.h>
34#include <linux/utsname.h>
35#include <linux/proc_fs.h>
36#include <linux/seq_file.h>
37#include <linux/blkdev.h>
38#include <scsi/scsi.h>
39#include <scsi/scsi_device.h>
40#include <scsi/scsi_host.h>
41
42#include <target/target_core_base.h>
43#include <target/target_core_transport.h>
44#include <target/target_core_fabric_ops.h>
45#include <target/target_core_configfs.h>
46
47#include "target_core_hba.h"
48#include "target_core_mib.h"
49
50/* SCSI mib table index */
51static struct scsi_index_table scsi_index_table;
52
53#ifndef INITIAL_JIFFIES
54#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
55#endif
56
57/* SCSI Instance Table */
58#define SCSI_INST_SW_INDEX 1
59#define SCSI_TRANSPORT_INDEX 1
60
61#define NONE "None"
62#define ISPRINT(a) ((a >= ' ') && (a <= '~'))
63
64static inline int list_is_first(const struct list_head *list,
65 const struct list_head *head)
66{
67 return list->prev == head;
68}
69
70static void *locate_hba_start(
71 struct seq_file *seq,
72 loff_t *pos)
73{
74 spin_lock(&se_global->g_device_lock);
75 return seq_list_start(&se_global->g_se_dev_list, *pos);
76}
77
78static void *locate_hba_next(
79 struct seq_file *seq,
80 void *v,
81 loff_t *pos)
82{
83 return seq_list_next(v, &se_global->g_se_dev_list, pos);
84}
85
86static void locate_hba_stop(struct seq_file *seq, void *v)
87{
88 spin_unlock(&se_global->g_device_lock);
89}
90
91/****************************************************************************
92 * SCSI MIB Tables
93 ****************************************************************************/
94
95/*
96 * SCSI Instance Table
97 */
98static void *scsi_inst_seq_start(
99 struct seq_file *seq,
100 loff_t *pos)
101{
102 spin_lock(&se_global->hba_lock);
103 return seq_list_start(&se_global->g_hba_list, *pos);
104}
105
106static void *scsi_inst_seq_next(
107 struct seq_file *seq,
108 void *v,
109 loff_t *pos)
110{
111 return seq_list_next(v, &se_global->g_hba_list, pos);
112}
113
114static void scsi_inst_seq_stop(struct seq_file *seq, void *v)
115{
116 spin_unlock(&se_global->hba_lock);
117}
118
119static int scsi_inst_seq_show(struct seq_file *seq, void *v)
120{
121 struct se_hba *hba = list_entry(v, struct se_hba, hba_list);
122
123 if (list_is_first(&hba->hba_list, &se_global->g_hba_list))
124 seq_puts(seq, "inst sw_indx\n");
125
126 seq_printf(seq, "%u %u\n", hba->hba_index, SCSI_INST_SW_INDEX);
127 seq_printf(seq, "plugin: %s version: %s\n",
128 hba->transport->name, TARGET_CORE_VERSION);
129
130 return 0;
131}
132
133static const struct seq_operations scsi_inst_seq_ops = {
134 .start = scsi_inst_seq_start,
135 .next = scsi_inst_seq_next,
136 .stop = scsi_inst_seq_stop,
137 .show = scsi_inst_seq_show
138};
139
140static int scsi_inst_seq_open(struct inode *inode, struct file *file)
141{
142 return seq_open(file, &scsi_inst_seq_ops);
143}
144
145static const struct file_operations scsi_inst_seq_fops = {
146 .owner = THIS_MODULE,
147 .open = scsi_inst_seq_open,
148 .read = seq_read,
149 .llseek = seq_lseek,
150 .release = seq_release,
151};
152
153/*
154 * SCSI Device Table
155 */
156static void *scsi_dev_seq_start(struct seq_file *seq, loff_t *pos)
157{
158 return locate_hba_start(seq, pos);
159}
160
161static void *scsi_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
162{
163 return locate_hba_next(seq, v, pos);
164}
165
166static void scsi_dev_seq_stop(struct seq_file *seq, void *v)
167{
168 locate_hba_stop(seq, v);
169}
170
171static int scsi_dev_seq_show(struct seq_file *seq, void *v)
172{
173 struct se_hba *hba;
174 struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
175 g_se_dev_list);
176 struct se_device *dev = se_dev->se_dev_ptr;
177 char str[28];
178 int k;
179
180 if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
181 seq_puts(seq, "inst indx role ports\n");
182
183 if (!(dev))
184 return 0;
185
186 hba = dev->se_hba;
187 if (!(hba)) {
188 /* Log error ? */
189 return 0;
190 }
191
192 seq_printf(seq, "%u %u %s %u\n", hba->hba_index,
193 dev->dev_index, "Target", dev->dev_port_count);
194
195 memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28);
196
197 /* vendor */
198 for (k = 0; k < 8; k++)
199 str[k] = ISPRINT(DEV_T10_WWN(dev)->vendor[k]) ?
200 DEV_T10_WWN(dev)->vendor[k] : 0x20;
201 str[k] = 0x20;
202
203 /* model */
204 for (k = 0; k < 16; k++)
205 str[k+9] = ISPRINT(DEV_T10_WWN(dev)->model[k]) ?
206 DEV_T10_WWN(dev)->model[k] : 0x20;
207 str[k + 9] = 0;
208
209 seq_printf(seq, "dev_alias: %s\n", str);
210
211 return 0;
212}
213
214static const struct seq_operations scsi_dev_seq_ops = {
215 .start = scsi_dev_seq_start,
216 .next = scsi_dev_seq_next,
217 .stop = scsi_dev_seq_stop,
218 .show = scsi_dev_seq_show
219};
220
221static int scsi_dev_seq_open(struct inode *inode, struct file *file)
222{
223 return seq_open(file, &scsi_dev_seq_ops);
224}
225
226static const struct file_operations scsi_dev_seq_fops = {
227 .owner = THIS_MODULE,
228 .open = scsi_dev_seq_open,
229 .read = seq_read,
230 .llseek = seq_lseek,
231 .release = seq_release,
232};
233
234/*
235 * SCSI Port Table
236 */
237static void *scsi_port_seq_start(struct seq_file *seq, loff_t *pos)
238{
239 return locate_hba_start(seq, pos);
240}
241
242static void *scsi_port_seq_next(struct seq_file *seq, void *v, loff_t *pos)
243{
244 return locate_hba_next(seq, v, pos);
245}
246
247static void scsi_port_seq_stop(struct seq_file *seq, void *v)
248{
249 locate_hba_stop(seq, v);
250}
251
252static int scsi_port_seq_show(struct seq_file *seq, void *v)
253{
254 struct se_hba *hba;
255 struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
256 g_se_dev_list);
257 struct se_device *dev = se_dev->se_dev_ptr;
258 struct se_port *sep, *sep_tmp;
259
260 if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
261 seq_puts(seq, "inst device indx role busy_count\n");
262
263 if (!(dev))
264 return 0;
265
266 hba = dev->se_hba;
267 if (!(hba)) {
268 /* Log error ? */
269 return 0;
270 }
271
272 /* FIXME: scsiPortBusyStatuses count */
273 spin_lock(&dev->se_port_lock);
274 list_for_each_entry_safe(sep, sep_tmp, &dev->dev_sep_list, sep_list) {
275 seq_printf(seq, "%u %u %u %s%u %u\n", hba->hba_index,
276 dev->dev_index, sep->sep_index, "Device",
277 dev->dev_index, 0);
278 }
279 spin_unlock(&dev->se_port_lock);
280
281 return 0;
282}
283
284static const struct seq_operations scsi_port_seq_ops = {
285 .start = scsi_port_seq_start,
286 .next = scsi_port_seq_next,
287 .stop = scsi_port_seq_stop,
288 .show = scsi_port_seq_show
289};
290
291static int scsi_port_seq_open(struct inode *inode, struct file *file)
292{
293 return seq_open(file, &scsi_port_seq_ops);
294}
295
296static const struct file_operations scsi_port_seq_fops = {
297 .owner = THIS_MODULE,
298 .open = scsi_port_seq_open,
299 .read = seq_read,
300 .llseek = seq_lseek,
301 .release = seq_release,
302};
303
304/*
305 * SCSI Transport Table
306 */
307static void *scsi_transport_seq_start(struct seq_file *seq, loff_t *pos)
308{
309 return locate_hba_start(seq, pos);
310}
311
312static void *scsi_transport_seq_next(struct seq_file *seq, void *v, loff_t *pos)
313{
314 return locate_hba_next(seq, v, pos);
315}
316
317static void scsi_transport_seq_stop(struct seq_file *seq, void *v)
318{
319 locate_hba_stop(seq, v);
320}
321
322static int scsi_transport_seq_show(struct seq_file *seq, void *v)
323{
324 struct se_hba *hba;
325 struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
326 g_se_dev_list);
327 struct se_device *dev = se_dev->se_dev_ptr;
328 struct se_port *se, *se_tmp;
329 struct se_portal_group *tpg;
330 struct t10_wwn *wwn;
331 char buf[64];
332
333 if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
334 seq_puts(seq, "inst device indx dev_name\n");
335
336 if (!(dev))
337 return 0;
338
339 hba = dev->se_hba;
340 if (!(hba)) {
341 /* Log error ? */
342 return 0;
343 }
344
345 wwn = DEV_T10_WWN(dev);
346
347 spin_lock(&dev->se_port_lock);
348 list_for_each_entry_safe(se, se_tmp, &dev->dev_sep_list, sep_list) {
349 tpg = se->sep_tpg;
350 sprintf(buf, "scsiTransport%s",
351 TPG_TFO(tpg)->get_fabric_name());
352
353 seq_printf(seq, "%u %s %u %s+%s\n",
354 hba->hba_index, /* scsiTransportIndex */
355 buf, /* scsiTransportType */
356 (TPG_TFO(tpg)->tpg_get_inst_index != NULL) ?
357 TPG_TFO(tpg)->tpg_get_inst_index(tpg) :
358 0,
359 TPG_TFO(tpg)->tpg_get_wwn(tpg),
360 (strlen(wwn->unit_serial)) ?
361 /* scsiTransportDevName */
362 wwn->unit_serial : wwn->vendor);
363 }
364 spin_unlock(&dev->se_port_lock);
365
366 return 0;
367}
368
369static const struct seq_operations scsi_transport_seq_ops = {
370 .start = scsi_transport_seq_start,
371 .next = scsi_transport_seq_next,
372 .stop = scsi_transport_seq_stop,
373 .show = scsi_transport_seq_show
374};
375
376static int scsi_transport_seq_open(struct inode *inode, struct file *file)
377{
378 return seq_open(file, &scsi_transport_seq_ops);
379}
380
381static const struct file_operations scsi_transport_seq_fops = {
382 .owner = THIS_MODULE,
383 .open = scsi_transport_seq_open,
384 .read = seq_read,
385 .llseek = seq_lseek,
386 .release = seq_release,
387};
388
389/*
390 * SCSI Target Device Table
391 */
392static void *scsi_tgt_dev_seq_start(struct seq_file *seq, loff_t *pos)
393{
394 return locate_hba_start(seq, pos);
395}
396
397static void *scsi_tgt_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
398{
399 return locate_hba_next(seq, v, pos);
400}
401
402static void scsi_tgt_dev_seq_stop(struct seq_file *seq, void *v)
403{
404 locate_hba_stop(seq, v);
405}
406
407
408#define LU_COUNT 1 /* for now */
409static int scsi_tgt_dev_seq_show(struct seq_file *seq, void *v)
410{
411 struct se_hba *hba;
412 struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
413 g_se_dev_list);
414 struct se_device *dev = se_dev->se_dev_ptr;
415 int non_accessible_lus = 0;
416 char status[16];
417
418 if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
419 seq_puts(seq, "inst indx num_LUs status non_access_LUs"
420 " resets\n");
421
422 if (!(dev))
423 return 0;
424
425 hba = dev->se_hba;
426 if (!(hba)) {
427 /* Log error ? */
428 return 0;
429 }
430
431 switch (dev->dev_status) {
432 case TRANSPORT_DEVICE_ACTIVATED:
433 strcpy(status, "activated");
434 break;
435 case TRANSPORT_DEVICE_DEACTIVATED:
436 strcpy(status, "deactivated");
437 non_accessible_lus = 1;
438 break;
439 case TRANSPORT_DEVICE_SHUTDOWN:
440 strcpy(status, "shutdown");
441 non_accessible_lus = 1;
442 break;
443 case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
444 case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
445 strcpy(status, "offline");
446 non_accessible_lus = 1;
447 break;
448 default:
449 sprintf(status, "unknown(%d)", dev->dev_status);
450 non_accessible_lus = 1;
451 }
452
453 seq_printf(seq, "%u %u %u %s %u %u\n",
454 hba->hba_index, dev->dev_index, LU_COUNT,
455 status, non_accessible_lus, dev->num_resets);
456
457 return 0;
458}
459
460static const struct seq_operations scsi_tgt_dev_seq_ops = {
461 .start = scsi_tgt_dev_seq_start,
462 .next = scsi_tgt_dev_seq_next,
463 .stop = scsi_tgt_dev_seq_stop,
464 .show = scsi_tgt_dev_seq_show
465};
466
467static int scsi_tgt_dev_seq_open(struct inode *inode, struct file *file)
468{
469 return seq_open(file, &scsi_tgt_dev_seq_ops);
470}
471
472static const struct file_operations scsi_tgt_dev_seq_fops = {
473 .owner = THIS_MODULE,
474 .open = scsi_tgt_dev_seq_open,
475 .read = seq_read,
476 .llseek = seq_lseek,
477 .release = seq_release,
478};
479
480/*
481 * SCSI Target Port Table
482 */
483static void *scsi_tgt_port_seq_start(struct seq_file *seq, loff_t *pos)
484{
485 return locate_hba_start(seq, pos);
486}
487
488static void *scsi_tgt_port_seq_next(struct seq_file *seq, void *v, loff_t *pos)
489{
490 return locate_hba_next(seq, v, pos);
491}
492
493static void scsi_tgt_port_seq_stop(struct seq_file *seq, void *v)
494{
495 locate_hba_stop(seq, v);
496}
497
498static int scsi_tgt_port_seq_show(struct seq_file *seq, void *v)
499{
500 struct se_hba *hba;
501 struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
502 g_se_dev_list);
503 struct se_device *dev = se_dev->se_dev_ptr;
504 struct se_port *sep, *sep_tmp;
505 struct se_portal_group *tpg;
506 u32 rx_mbytes, tx_mbytes;
507 unsigned long long num_cmds;
508 char buf[64];
509
510 if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
511 seq_puts(seq, "inst device indx name port_index in_cmds"
512 " write_mbytes read_mbytes hs_in_cmds\n");
513
514 if (!(dev))
515 return 0;
516
517 hba = dev->se_hba;
518 if (!(hba)) {
519 /* Log error ? */
520 return 0;
521 }
522
523 spin_lock(&dev->se_port_lock);
524 list_for_each_entry_safe(sep, sep_tmp, &dev->dev_sep_list, sep_list) {
525 tpg = sep->sep_tpg;
526 sprintf(buf, "%sPort#",
527 TPG_TFO(tpg)->get_fabric_name());
528
529 seq_printf(seq, "%u %u %u %s%d %s%s%d ",
530 hba->hba_index,
531 dev->dev_index,
532 sep->sep_index,
533 buf, sep->sep_index,
534 TPG_TFO(tpg)->tpg_get_wwn(tpg), "+t+",
535 TPG_TFO(tpg)->tpg_get_tag(tpg));
536
537 spin_lock(&sep->sep_lun->lun_sep_lock);
538 num_cmds = sep->sep_stats.cmd_pdus;
539 rx_mbytes = (sep->sep_stats.rx_data_octets >> 20);
540 tx_mbytes = (sep->sep_stats.tx_data_octets >> 20);
541 spin_unlock(&sep->sep_lun->lun_sep_lock);
542
543 seq_printf(seq, "%llu %u %u %u\n", num_cmds,
544 rx_mbytes, tx_mbytes, 0);
545 }
546 spin_unlock(&dev->se_port_lock);
547
548 return 0;
549}
550
551static const struct seq_operations scsi_tgt_port_seq_ops = {
552 .start = scsi_tgt_port_seq_start,
553 .next = scsi_tgt_port_seq_next,
554 .stop = scsi_tgt_port_seq_stop,
555 .show = scsi_tgt_port_seq_show
556};
557
558static int scsi_tgt_port_seq_open(struct inode *inode, struct file *file)
559{
560 return seq_open(file, &scsi_tgt_port_seq_ops);
561}
562
563static const struct file_operations scsi_tgt_port_seq_fops = {
564 .owner = THIS_MODULE,
565 .open = scsi_tgt_port_seq_open,
566 .read = seq_read,
567 .llseek = seq_lseek,
568 .release = seq_release,
569};
570
571/*
572 * SCSI Authorized Initiator Table:
573 * It contains the SCSI Initiators authorized to be attached to one of the
574 * local Target ports.
575 * Iterates through all active TPGs and extracts the info from the ACLs
576 */
577static void *scsi_auth_intr_seq_start(struct seq_file *seq, loff_t *pos)
578{
579 spin_lock_bh(&se_global->se_tpg_lock);
580 return seq_list_start(&se_global->g_se_tpg_list, *pos);
581}
582
583static void *scsi_auth_intr_seq_next(struct seq_file *seq, void *v,
584 loff_t *pos)
585{
586 return seq_list_next(v, &se_global->g_se_tpg_list, pos);
587}
588
589static void scsi_auth_intr_seq_stop(struct seq_file *seq, void *v)
590{
591 spin_unlock_bh(&se_global->se_tpg_lock);
592}
593
594static int scsi_auth_intr_seq_show(struct seq_file *seq, void *v)
595{
596 struct se_portal_group *se_tpg = list_entry(v, struct se_portal_group,
597 se_tpg_list);
598 struct se_dev_entry *deve;
599 struct se_lun *lun;
600 struct se_node_acl *se_nacl;
601 int j;
602
603 if (list_is_first(&se_tpg->se_tpg_list,
604 &se_global->g_se_tpg_list))
605 seq_puts(seq, "inst dev port indx dev_or_port intr_name "
606 "map_indx att_count num_cmds read_mbytes "
607 "write_mbytes hs_num_cmds creation_time row_status\n");
608
609 if (!(se_tpg))
610 return 0;
611
612 spin_lock(&se_tpg->acl_node_lock);
613 list_for_each_entry(se_nacl, &se_tpg->acl_node_list, acl_list) {
614
615 atomic_inc(&se_nacl->mib_ref_count);
616 smp_mb__after_atomic_inc();
617 spin_unlock(&se_tpg->acl_node_lock);
618
619 spin_lock_irq(&se_nacl->device_list_lock);
620 for (j = 0; j < TRANSPORT_MAX_LUNS_PER_TPG; j++) {
621 deve = &se_nacl->device_list[j];
622 if (!(deve->lun_flags &
623 TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) ||
624 (!deve->se_lun))
625 continue;
626 lun = deve->se_lun;
627 if (!lun->lun_se_dev)
628 continue;
629
630 seq_printf(seq, "%u %u %u %u %u %s %u %u %u %u %u %u"
631 " %u %s\n",
632 /* scsiInstIndex */
633 (TPG_TFO(se_tpg)->tpg_get_inst_index != NULL) ?
634 TPG_TFO(se_tpg)->tpg_get_inst_index(se_tpg) :
635 0,
636 /* scsiDeviceIndex */
637 lun->lun_se_dev->dev_index,
638 /* scsiAuthIntrTgtPortIndex */
639 TPG_TFO(se_tpg)->tpg_get_tag(se_tpg),
640 /* scsiAuthIntrIndex */
641 se_nacl->acl_index,
642 /* scsiAuthIntrDevOrPort */
643 1,
644 /* scsiAuthIntrName */
645 se_nacl->initiatorname[0] ?
646 se_nacl->initiatorname : NONE,
647 /* FIXME: scsiAuthIntrLunMapIndex */
648 0,
649 /* scsiAuthIntrAttachedTimes */
650 deve->attach_count,
651 /* scsiAuthIntrOutCommands */
652 deve->total_cmds,
653 /* scsiAuthIntrReadMegaBytes */
654 (u32)(deve->read_bytes >> 20),
655 /* scsiAuthIntrWrittenMegaBytes */
656 (u32)(deve->write_bytes >> 20),
657 /* FIXME: scsiAuthIntrHSOutCommands */
658 0,
659 /* scsiAuthIntrLastCreation */
660 (u32)(((u32)deve->creation_time -
661 INITIAL_JIFFIES) * 100 / HZ),
662 /* FIXME: scsiAuthIntrRowStatus */
663 "Ready");
664 }
665 spin_unlock_irq(&se_nacl->device_list_lock);
666
667 spin_lock(&se_tpg->acl_node_lock);
668 atomic_dec(&se_nacl->mib_ref_count);
669 smp_mb__after_atomic_dec();
670 }
671 spin_unlock(&se_tpg->acl_node_lock);
672
673 return 0;
674}
675
676static const struct seq_operations scsi_auth_intr_seq_ops = {
677 .start = scsi_auth_intr_seq_start,
678 .next = scsi_auth_intr_seq_next,
679 .stop = scsi_auth_intr_seq_stop,
680 .show = scsi_auth_intr_seq_show
681};
682
683static int scsi_auth_intr_seq_open(struct inode *inode, struct file *file)
684{
685 return seq_open(file, &scsi_auth_intr_seq_ops);
686}
687
688static const struct file_operations scsi_auth_intr_seq_fops = {
689 .owner = THIS_MODULE,
690 .open = scsi_auth_intr_seq_open,
691 .read = seq_read,
692 .llseek = seq_lseek,
693 .release = seq_release,
694};
695
696/*
697 * SCSI Attached Initiator Port Table:
698 * It lists the SCSI Initiators attached to one of the local Target ports.
699 * Iterates through all active TPGs and use active sessions from each TPG
700 * to list the info fo this table.
701 */
702static void *scsi_att_intr_port_seq_start(struct seq_file *seq, loff_t *pos)
703{
704 spin_lock_bh(&se_global->se_tpg_lock);
705 return seq_list_start(&se_global->g_se_tpg_list, *pos);
706}
707
708static void *scsi_att_intr_port_seq_next(struct seq_file *seq, void *v,
709 loff_t *pos)
710{
711 return seq_list_next(v, &se_global->g_se_tpg_list, pos);
712}
713
714static void scsi_att_intr_port_seq_stop(struct seq_file *seq, void *v)
715{
716 spin_unlock_bh(&se_global->se_tpg_lock);
717}
718
719static int scsi_att_intr_port_seq_show(struct seq_file *seq, void *v)
720{
721 struct se_portal_group *se_tpg = list_entry(v, struct se_portal_group,
722 se_tpg_list);
723 struct se_dev_entry *deve;
724 struct se_lun *lun;
725 struct se_node_acl *se_nacl;
726 struct se_session *se_sess;
727 unsigned char buf[64];
728 int j;
729
730 if (list_is_first(&se_tpg->se_tpg_list,
731 &se_global->g_se_tpg_list))
732 seq_puts(seq, "inst dev port indx port_auth_indx port_name"
733 " port_ident\n");
734
735 if (!(se_tpg))
736 return 0;
737
738 spin_lock(&se_tpg->session_lock);
739 list_for_each_entry(se_sess, &se_tpg->tpg_sess_list, sess_list) {
740 if ((TPG_TFO(se_tpg)->sess_logged_in(se_sess)) ||
741 (!se_sess->se_node_acl) ||
742 (!se_sess->se_node_acl->device_list))
743 continue;
744
745 atomic_inc(&se_sess->mib_ref_count);
746 smp_mb__after_atomic_inc();
747 se_nacl = se_sess->se_node_acl;
748 atomic_inc(&se_nacl->mib_ref_count);
749 smp_mb__after_atomic_inc();
750 spin_unlock(&se_tpg->session_lock);
751
752 spin_lock_irq(&se_nacl->device_list_lock);
753 for (j = 0; j < TRANSPORT_MAX_LUNS_PER_TPG; j++) {
754 deve = &se_nacl->device_list[j];
755 if (!(deve->lun_flags &
756 TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) ||
757 (!deve->se_lun))
758 continue;
759
760 lun = deve->se_lun;
761 if (!lun->lun_se_dev)
762 continue;
763
764 memset(buf, 0, 64);
765 if (TPG_TFO(se_tpg)->sess_get_initiator_sid != NULL)
766 TPG_TFO(se_tpg)->sess_get_initiator_sid(
767 se_sess, (unsigned char *)&buf[0], 64);
768
769 seq_printf(seq, "%u %u %u %u %u %s+i+%s\n",
770 /* scsiInstIndex */
771 (TPG_TFO(se_tpg)->tpg_get_inst_index != NULL) ?
772 TPG_TFO(se_tpg)->tpg_get_inst_index(se_tpg) :
773 0,
774 /* scsiDeviceIndex */
775 lun->lun_se_dev->dev_index,
776 /* scsiPortIndex */
777 TPG_TFO(se_tpg)->tpg_get_tag(se_tpg),
778 /* scsiAttIntrPortIndex */
779 (TPG_TFO(se_tpg)->sess_get_index != NULL) ?
780 TPG_TFO(se_tpg)->sess_get_index(se_sess) :
781 0,
782 /* scsiAttIntrPortAuthIntrIdx */
783 se_nacl->acl_index,
784 /* scsiAttIntrPortName */
785 se_nacl->initiatorname[0] ?
786 se_nacl->initiatorname : NONE,
787 /* scsiAttIntrPortIdentifier */
788 buf);
789 }
790 spin_unlock_irq(&se_nacl->device_list_lock);
791
792 spin_lock(&se_tpg->session_lock);
793 atomic_dec(&se_nacl->mib_ref_count);
794 smp_mb__after_atomic_dec();
795 atomic_dec(&se_sess->mib_ref_count);
796 smp_mb__after_atomic_dec();
797 }
798 spin_unlock(&se_tpg->session_lock);
799
800 return 0;
801}
802
803static const struct seq_operations scsi_att_intr_port_seq_ops = {
804 .start = scsi_att_intr_port_seq_start,
805 .next = scsi_att_intr_port_seq_next,
806 .stop = scsi_att_intr_port_seq_stop,
807 .show = scsi_att_intr_port_seq_show
808};
809
810static int scsi_att_intr_port_seq_open(struct inode *inode, struct file *file)
811{
812 return seq_open(file, &scsi_att_intr_port_seq_ops);
813}
814
815static const struct file_operations scsi_att_intr_port_seq_fops = {
816 .owner = THIS_MODULE,
817 .open = scsi_att_intr_port_seq_open,
818 .read = seq_read,
819 .llseek = seq_lseek,
820 .release = seq_release,
821};
822
823/*
824 * SCSI Logical Unit Table
825 */
826static void *scsi_lu_seq_start(struct seq_file *seq, loff_t *pos)
827{
828 return locate_hba_start(seq, pos);
829}
830
831static void *scsi_lu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
832{
833 return locate_hba_next(seq, v, pos);
834}
835
836static void scsi_lu_seq_stop(struct seq_file *seq, void *v)
837{
838 locate_hba_stop(seq, v);
839}
840
841#define SCSI_LU_INDEX 1
842static int scsi_lu_seq_show(struct seq_file *seq, void *v)
843{
844 struct se_hba *hba;
845 struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
846 g_se_dev_list);
847 struct se_device *dev = se_dev->se_dev_ptr;
848 int j;
849 char str[28];
850
851 if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
852 seq_puts(seq, "inst dev indx LUN lu_name vend prod rev"
853 " dev_type status state-bit num_cmds read_mbytes"
854 " write_mbytes resets full_stat hs_num_cmds creation_time\n");
855
856 if (!(dev))
857 return 0;
858
859 hba = dev->se_hba;
860 if (!(hba)) {
861 /* Log error ? */
862 return 0;
863 }
864
865 /* Fix LU state, if we can read it from the device */
866 seq_printf(seq, "%u %u %u %llu %s", hba->hba_index,
867 dev->dev_index, SCSI_LU_INDEX,
868 (unsigned long long)0, /* FIXME: scsiLuDefaultLun */
869 (strlen(DEV_T10_WWN(dev)->unit_serial)) ?
870 /* scsiLuWwnName */
871 (char *)&DEV_T10_WWN(dev)->unit_serial[0] :
872 "None");
873
874 memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28);
875 /* scsiLuVendorId */
876 for (j = 0; j < 8; j++)
877 str[j] = ISPRINT(DEV_T10_WWN(dev)->vendor[j]) ?
878 DEV_T10_WWN(dev)->vendor[j] : 0x20;
879 str[8] = 0;
880 seq_printf(seq, " %s", str);
881
882 /* scsiLuProductId */
883 for (j = 0; j < 16; j++)
884 str[j] = ISPRINT(DEV_T10_WWN(dev)->model[j]) ?
885 DEV_T10_WWN(dev)->model[j] : 0x20;
886 str[16] = 0;
887 seq_printf(seq, " %s", str);
888
889 /* scsiLuRevisionId */
890 for (j = 0; j < 4; j++)
891 str[j] = ISPRINT(DEV_T10_WWN(dev)->revision[j]) ?
892 DEV_T10_WWN(dev)->revision[j] : 0x20;
893 str[4] = 0;
894 seq_printf(seq, " %s", str);
895
896 seq_printf(seq, " %u %s %s %llu %u %u %u %u %u %u\n",
897 /* scsiLuPeripheralType */
898 TRANSPORT(dev)->get_device_type(dev),
899 (dev->dev_status == TRANSPORT_DEVICE_ACTIVATED) ?
900 "available" : "notavailable", /* scsiLuStatus */
901 "exposed", /* scsiLuState */
902 (unsigned long long)dev->num_cmds,
903 /* scsiLuReadMegaBytes */
904 (u32)(dev->read_bytes >> 20),
905 /* scsiLuWrittenMegaBytes */
906 (u32)(dev->write_bytes >> 20),
907 dev->num_resets, /* scsiLuInResets */
908 0, /* scsiLuOutTaskSetFullStatus */
909 0, /* scsiLuHSInCommands */
910 (u32)(((u32)dev->creation_time - INITIAL_JIFFIES) *
911 100 / HZ));
912
913 return 0;
914}
915
916static const struct seq_operations scsi_lu_seq_ops = {
917 .start = scsi_lu_seq_start,
918 .next = scsi_lu_seq_next,
919 .stop = scsi_lu_seq_stop,
920 .show = scsi_lu_seq_show
921};
922
923static int scsi_lu_seq_open(struct inode *inode, struct file *file)
924{
925 return seq_open(file, &scsi_lu_seq_ops);
926}
927
928static const struct file_operations scsi_lu_seq_fops = {
929 .owner = THIS_MODULE,
930 .open = scsi_lu_seq_open,
931 .read = seq_read,
932 .llseek = seq_lseek,
933 .release = seq_release,
934};
935
936/****************************************************************************/
937
938/*
939 * Remove proc fs entries
940 */
941void remove_scsi_target_mib(void)
942{
943 remove_proc_entry("scsi_target/mib/scsi_inst", NULL);
944 remove_proc_entry("scsi_target/mib/scsi_dev", NULL);
945 remove_proc_entry("scsi_target/mib/scsi_port", NULL);
946 remove_proc_entry("scsi_target/mib/scsi_transport", NULL);
947 remove_proc_entry("scsi_target/mib/scsi_tgt_dev", NULL);
948 remove_proc_entry("scsi_target/mib/scsi_tgt_port", NULL);
949 remove_proc_entry("scsi_target/mib/scsi_auth_intr", NULL);
950 remove_proc_entry("scsi_target/mib/scsi_att_intr_port", NULL);
951 remove_proc_entry("scsi_target/mib/scsi_lu", NULL);
952 remove_proc_entry("scsi_target/mib", NULL);
953}
954
955/*
956 * Create proc fs entries for the mib tables
957 */
958int init_scsi_target_mib(void)
959{
960 struct proc_dir_entry *dir_entry;
961 struct proc_dir_entry *scsi_inst_entry;
962 struct proc_dir_entry *scsi_dev_entry;
963 struct proc_dir_entry *scsi_port_entry;
964 struct proc_dir_entry *scsi_transport_entry;
965 struct proc_dir_entry *scsi_tgt_dev_entry;
966 struct proc_dir_entry *scsi_tgt_port_entry;
967 struct proc_dir_entry *scsi_auth_intr_entry;
968 struct proc_dir_entry *scsi_att_intr_port_entry;
969 struct proc_dir_entry *scsi_lu_entry;
970
971 dir_entry = proc_mkdir("scsi_target/mib", NULL);
972 if (!(dir_entry)) {
973 printk(KERN_ERR "proc_mkdir() failed.\n");
974 return -1;
975 }
976
977 scsi_inst_entry =
978 create_proc_entry("scsi_target/mib/scsi_inst", 0, NULL);
979 if (scsi_inst_entry)
980 scsi_inst_entry->proc_fops = &scsi_inst_seq_fops;
981 else
982 goto error;
983
984 scsi_dev_entry =
985 create_proc_entry("scsi_target/mib/scsi_dev", 0, NULL);
986 if (scsi_dev_entry)
987 scsi_dev_entry->proc_fops = &scsi_dev_seq_fops;
988 else
989 goto error;
990
991 scsi_port_entry =
992 create_proc_entry("scsi_target/mib/scsi_port", 0, NULL);
993 if (scsi_port_entry)
994 scsi_port_entry->proc_fops = &scsi_port_seq_fops;
995 else
996 goto error;
997
998 scsi_transport_entry =
999 create_proc_entry("scsi_target/mib/scsi_transport", 0, NULL);
1000 if (scsi_transport_entry)
1001 scsi_transport_entry->proc_fops = &scsi_transport_seq_fops;
1002 else
1003 goto error;
1004
1005 scsi_tgt_dev_entry =
1006 create_proc_entry("scsi_target/mib/scsi_tgt_dev", 0, NULL);
1007 if (scsi_tgt_dev_entry)
1008 scsi_tgt_dev_entry->proc_fops = &scsi_tgt_dev_seq_fops;
1009 else
1010 goto error;
1011
1012 scsi_tgt_port_entry =
1013 create_proc_entry("scsi_target/mib/scsi_tgt_port", 0, NULL);
1014 if (scsi_tgt_port_entry)
1015 scsi_tgt_port_entry->proc_fops = &scsi_tgt_port_seq_fops;
1016 else
1017 goto error;
1018
1019 scsi_auth_intr_entry =
1020 create_proc_entry("scsi_target/mib/scsi_auth_intr", 0, NULL);
1021 if (scsi_auth_intr_entry)
1022 scsi_auth_intr_entry->proc_fops = &scsi_auth_intr_seq_fops;
1023 else
1024 goto error;
1025
1026 scsi_att_intr_port_entry =
1027 create_proc_entry("scsi_target/mib/scsi_att_intr_port", 0, NULL);
1028 if (scsi_att_intr_port_entry)
1029 scsi_att_intr_port_entry->proc_fops =
1030 &scsi_att_intr_port_seq_fops;
1031 else
1032 goto error;
1033
1034 scsi_lu_entry = create_proc_entry("scsi_target/mib/scsi_lu", 0, NULL);
1035 if (scsi_lu_entry)
1036 scsi_lu_entry->proc_fops = &scsi_lu_seq_fops;
1037 else
1038 goto error;
1039
1040 return 0;
1041
1042error:
1043 printk(KERN_ERR "create_proc_entry() failed.\n");
1044 remove_scsi_target_mib();
1045 return -1;
1046}
1047
1048/*
1049 * Initialize the index table for allocating unique row indexes to various mib
1050 * tables
1051 */
1052void init_scsi_index_table(void)
1053{
1054 memset(&scsi_index_table, 0, sizeof(struct scsi_index_table));
1055 spin_lock_init(&scsi_index_table.lock);
1056}
1057
1058/*
1059 * Allocate a new row index for the entry type specified
1060 */
1061u32 scsi_get_new_index(scsi_index_t type)
1062{
1063 u32 new_index;
1064
1065 if ((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)) {
1066 printk(KERN_ERR "Invalid index type %d\n", type);
1067 return -1;
1068 }
1069
1070 spin_lock(&scsi_index_table.lock);
1071 new_index = ++scsi_index_table.scsi_mib_index[type];
1072 if (new_index == 0)
1073 new_index = ++scsi_index_table.scsi_mib_index[type];
1074 spin_unlock(&scsi_index_table.lock);
1075
1076 return new_index;
1077}
1078EXPORT_SYMBOL(scsi_get_new_index);
diff --git a/drivers/target/target_core_mib.h b/drivers/target/target_core_mib.h
deleted file mode 100644
index 277204633850..000000000000
--- a/drivers/target/target_core_mib.h
+++ /dev/null
@@ -1,28 +0,0 @@
1#ifndef TARGET_CORE_MIB_H
2#define TARGET_CORE_MIB_H
3
4typedef enum {
5 SCSI_INST_INDEX,
6 SCSI_DEVICE_INDEX,
7 SCSI_AUTH_INTR_INDEX,
8 SCSI_INDEX_TYPE_MAX
9} scsi_index_t;
10
11struct scsi_index_table {
12 spinlock_t lock;
13 u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX];
14} ____cacheline_aligned;
15
16/* SCSI Port stats */
17struct scsi_port_stats {
18 u64 cmd_pdus;
19 u64 tx_data_octets;
20 u64 rx_data_octets;
21} ____cacheline_aligned;
22
23extern int init_scsi_target_mib(void);
24extern void remove_scsi_target_mib(void);
25extern void init_scsi_index_table(void);
26extern u32 scsi_get_new_index(scsi_index_t);
27
28#endif /*** TARGET_CORE_MIB_H ***/
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 742d24609a9b..f2a08477a68c 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -462,8 +462,8 @@ static struct se_device *pscsi_create_type_disk(
462 */ 462 */
463 bd = blkdev_get_by_path(se_dev->se_dev_udev_path, 463 bd = blkdev_get_by_path(se_dev->se_dev_udev_path,
464 FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv); 464 FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv);
465 if (!(bd)) { 465 if (IS_ERR(bd)) {
466 printk("pSCSI: blkdev_get_by_path() failed\n"); 466 printk(KERN_ERR "pSCSI: blkdev_get_by_path() failed\n");
467 scsi_device_put(sd); 467 scsi_device_put(sd);
468 return NULL; 468 return NULL;
469 } 469 }
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index abfa81a57115..c26f67467623 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -275,7 +275,6 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
275 spin_lock_init(&acl->device_list_lock); 275 spin_lock_init(&acl->device_list_lock);
276 spin_lock_init(&acl->nacl_sess_lock); 276 spin_lock_init(&acl->nacl_sess_lock);
277 atomic_set(&acl->acl_pr_ref_count, 0); 277 atomic_set(&acl->acl_pr_ref_count, 0);
278 atomic_set(&acl->mib_ref_count, 0);
279 acl->queue_depth = TPG_TFO(tpg)->tpg_get_default_depth(tpg); 278 acl->queue_depth = TPG_TFO(tpg)->tpg_get_default_depth(tpg);
280 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); 279 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
281 acl->se_tpg = tpg; 280 acl->se_tpg = tpg;
@@ -318,12 +317,6 @@ void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
318 cpu_relax(); 317 cpu_relax();
319} 318}
320 319
321void core_tpg_wait_for_mib_ref(struct se_node_acl *nacl)
322{
323 while (atomic_read(&nacl->mib_ref_count) != 0)
324 cpu_relax();
325}
326
327void core_tpg_clear_object_luns(struct se_portal_group *tpg) 320void core_tpg_clear_object_luns(struct se_portal_group *tpg)
328{ 321{
329 int i, ret; 322 int i, ret;
@@ -480,7 +473,6 @@ int core_tpg_del_initiator_node_acl(
480 spin_unlock_bh(&tpg->session_lock); 473 spin_unlock_bh(&tpg->session_lock);
481 474
482 core_tpg_wait_for_nacl_pr_ref(acl); 475 core_tpg_wait_for_nacl_pr_ref(acl);
483 core_tpg_wait_for_mib_ref(acl);
484 core_clear_initiator_node_from_tpg(acl, tpg); 476 core_clear_initiator_node_from_tpg(acl, tpg);
485 core_free_device_list_for_node(acl, tpg); 477 core_free_device_list_for_node(acl, tpg);
486 478
@@ -701,6 +693,8 @@ EXPORT_SYMBOL(core_tpg_register);
701 693
702int core_tpg_deregister(struct se_portal_group *se_tpg) 694int core_tpg_deregister(struct se_portal_group *se_tpg)
703{ 695{
696 struct se_node_acl *nacl, *nacl_tmp;
697
704 printk(KERN_INFO "TARGET_CORE[%s]: Deallocating %s struct se_portal_group" 698 printk(KERN_INFO "TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
705 " for endpoint: %s Portal Tag %u\n", 699 " for endpoint: %s Portal Tag %u\n",
706 (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ? 700 (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
@@ -714,6 +708,25 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)
714 708
715 while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0) 709 while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
716 cpu_relax(); 710 cpu_relax();
711 /*
712 * Release any remaining demo-mode generated se_node_acl that have
713 * not been released because of TFO->tpg_check_demo_mode_cache() == 1
714 * in transport_deregister_session().
715 */
716 spin_lock_bh(&se_tpg->acl_node_lock);
717 list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list,
718 acl_list) {
719 list_del(&nacl->acl_list);
720 se_tpg->num_node_acls--;
721 spin_unlock_bh(&se_tpg->acl_node_lock);
722
723 core_tpg_wait_for_nacl_pr_ref(nacl);
724 core_free_device_list_for_node(nacl, se_tpg);
725 TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg, nacl);
726
727 spin_lock_bh(&se_tpg->acl_node_lock);
728 }
729 spin_unlock_bh(&se_tpg->acl_node_lock);
717 730
718 if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) 731 if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL)
719 core_tpg_release_virtual_lun0(se_tpg); 732 core_tpg_release_virtual_lun0(se_tpg);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 28b6292ff298..236e22d8cfae 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -379,6 +379,40 @@ void release_se_global(void)
379 se_global = NULL; 379 se_global = NULL;
380} 380}
381 381
382/* SCSI statistics table index */
383static struct scsi_index_table scsi_index_table;
384
385/*
386 * Initialize the index table for allocating unique row indexes to various mib
387 * tables.
388 */
389void init_scsi_index_table(void)
390{
391 memset(&scsi_index_table, 0, sizeof(struct scsi_index_table));
392 spin_lock_init(&scsi_index_table.lock);
393}
394
395/*
396 * Allocate a new row index for the entry type specified
397 */
398u32 scsi_get_new_index(scsi_index_t type)
399{
400 u32 new_index;
401
402 if ((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)) {
403 printk(KERN_ERR "Invalid index type %d\n", type);
404 return -EINVAL;
405 }
406
407 spin_lock(&scsi_index_table.lock);
408 new_index = ++scsi_index_table.scsi_mib_index[type];
409 if (new_index == 0)
410 new_index = ++scsi_index_table.scsi_mib_index[type];
411 spin_unlock(&scsi_index_table.lock);
412
413 return new_index;
414}
415
382void transport_init_queue_obj(struct se_queue_obj *qobj) 416void transport_init_queue_obj(struct se_queue_obj *qobj)
383{ 417{
384 atomic_set(&qobj->queue_cnt, 0); 418 atomic_set(&qobj->queue_cnt, 0);
@@ -437,7 +471,6 @@ struct se_session *transport_init_session(void)
437 } 471 }
438 INIT_LIST_HEAD(&se_sess->sess_list); 472 INIT_LIST_HEAD(&se_sess->sess_list);
439 INIT_LIST_HEAD(&se_sess->sess_acl_list); 473 INIT_LIST_HEAD(&se_sess->sess_acl_list);
440 atomic_set(&se_sess->mib_ref_count, 0);
441 474
442 return se_sess; 475 return se_sess;
443} 476}
@@ -546,12 +579,6 @@ void transport_deregister_session(struct se_session *se_sess)
546 transport_free_session(se_sess); 579 transport_free_session(se_sess);
547 return; 580 return;
548 } 581 }
549 /*
550 * Wait for possible reference in drivers/target/target_core_mib.c:
551 * scsi_att_intr_port_seq_show()
552 */
553 while (atomic_read(&se_sess->mib_ref_count) != 0)
554 cpu_relax();
555 582
556 spin_lock_bh(&se_tpg->session_lock); 583 spin_lock_bh(&se_tpg->session_lock);
557 list_del(&se_sess->sess_list); 584 list_del(&se_sess->sess_list);
@@ -574,7 +601,6 @@ void transport_deregister_session(struct se_session *se_sess)
574 spin_unlock_bh(&se_tpg->acl_node_lock); 601 spin_unlock_bh(&se_tpg->acl_node_lock);
575 602
576 core_tpg_wait_for_nacl_pr_ref(se_nacl); 603 core_tpg_wait_for_nacl_pr_ref(se_nacl);
577 core_tpg_wait_for_mib_ref(se_nacl);
578 core_free_device_list_for_node(se_nacl, se_tpg); 604 core_free_device_list_for_node(se_nacl, se_tpg);
579 TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg, 605 TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg,
580 se_nacl); 606 se_nacl);
@@ -4827,6 +4853,8 @@ static int transport_do_se_mem_map(
4827 4853
4828 return ret; 4854 return ret;
4829 } 4855 }
4856
4857 BUG_ON(list_empty(se_mem_list));
4830 /* 4858 /*
4831 * This is the normal path for all normal non BIDI and BIDI-COMMAND 4859 * This is the normal path for all normal non BIDI and BIDI-COMMAND
4832 * WRITE payloads.. If we need to do BIDI READ passthrough for 4860 * WRITE payloads.. If we need to do BIDI READ passthrough for
@@ -5008,7 +5036,9 @@ transport_map_control_cmd_to_task(struct se_cmd *cmd)
5008 struct se_mem *se_mem = NULL, *se_mem_lout = NULL; 5036 struct se_mem *se_mem = NULL, *se_mem_lout = NULL;
5009 u32 se_mem_cnt = 0, task_offset = 0; 5037 u32 se_mem_cnt = 0, task_offset = 0;
5010 5038
5011 BUG_ON(list_empty(cmd->t_task->t_mem_list)); 5039 if (!list_empty(T_TASK(cmd)->t_mem_list))
5040 se_mem = list_entry(T_TASK(cmd)->t_mem_list->next,
5041 struct se_mem, se_list);
5012 5042
5013 ret = transport_do_se_mem_map(dev, task, 5043 ret = transport_do_se_mem_map(dev, task,
5014 cmd->t_task->t_mem_list, NULL, se_mem, 5044 cmd->t_task->t_mem_list, NULL, se_mem,
diff --git a/drivers/tty/hvc/Makefile b/drivers/tty/hvc/Makefile
index e6bed5f177ff..d79e7e9bf9d2 100644
--- a/drivers/tty/hvc/Makefile
+++ b/drivers/tty/hvc/Makefile
@@ -10,4 +10,3 @@ obj-$(CONFIG_HVC_XEN) += hvc_xen.o
10obj-$(CONFIG_HVC_IUCV) += hvc_iucv.o 10obj-$(CONFIG_HVC_IUCV) += hvc_iucv.o
11obj-$(CONFIG_HVC_UDBG) += hvc_udbg.o 11obj-$(CONFIG_HVC_UDBG) += hvc_udbg.o
12obj-$(CONFIG_HVCS) += hvcs.o 12obj-$(CONFIG_HVCS) += hvcs.o
13obj-$(CONFIG_VIRTIO_CONSOLE) += virtio_console.o
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 44b8412a04e8..aa2e5d3eb01a 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -2414,6 +2414,7 @@ static int gsmld_config(struct tty_struct *tty, struct gsm_mux *gsm,
2414 2414
2415 gsm->initiator = c->initiator; 2415 gsm->initiator = c->initiator;
2416 gsm->mru = c->mru; 2416 gsm->mru = c->mru;
2417 gsm->mtu = c->mtu;
2417 gsm->encoding = c->encapsulation; 2418 gsm->encoding = c->encapsulation;
2418 gsm->adaption = c->adaption; 2419 gsm->adaption = c->adaption;
2419 gsm->n2 = c->n2; 2420 gsm->n2 = c->n2;
diff --git a/drivers/tty/serial/68328serial.c b/drivers/tty/serial/68328serial.c
index be0ebce36e54..de0160e3f8c4 100644
--- a/drivers/tty/serial/68328serial.c
+++ b/drivers/tty/serial/68328serial.c
@@ -262,7 +262,7 @@ static void status_handle(struct m68k_serial *info, unsigned short status)
262 262
263static void receive_chars(struct m68k_serial *info, unsigned short rx) 263static void receive_chars(struct m68k_serial *info, unsigned short rx)
264{ 264{
265 struct tty_struct *tty = info->port.tty; 265 struct tty_struct *tty = info->tty;
266 m68328_uart *uart = &uart_addr[info->line]; 266 m68328_uart *uart = &uart_addr[info->line];
267 unsigned char ch, flag; 267 unsigned char ch, flag;
268 268
@@ -329,7 +329,7 @@ static void transmit_chars(struct m68k_serial *info)
329 goto clear_and_return; 329 goto clear_and_return;
330 } 330 }
331 331
332 if((info->xmit_cnt <= 0) || info->port.tty->stopped) { 332 if((info->xmit_cnt <= 0) || info->tty->stopped) {
333 /* That's peculiar... TX ints off */ 333 /* That's peculiar... TX ints off */
334 uart->ustcnt &= ~USTCNT_TX_INTR_MASK; 334 uart->ustcnt &= ~USTCNT_TX_INTR_MASK;
335 goto clear_and_return; 335 goto clear_and_return;
@@ -383,7 +383,7 @@ static void do_softint(struct work_struct *work)
383 struct m68k_serial *info = container_of(work, struct m68k_serial, tqueue); 383 struct m68k_serial *info = container_of(work, struct m68k_serial, tqueue);
384 struct tty_struct *tty; 384 struct tty_struct *tty;
385 385
386 tty = info->port.tty; 386 tty = info->tty;
387 if (!tty) 387 if (!tty)
388 return; 388 return;
389#if 0 389#if 0
@@ -407,7 +407,7 @@ static void do_serial_hangup(struct work_struct *work)
407 struct m68k_serial *info = container_of(work, struct m68k_serial, tqueue_hangup); 407 struct m68k_serial *info = container_of(work, struct m68k_serial, tqueue_hangup);
408 struct tty_struct *tty; 408 struct tty_struct *tty;
409 409
410 tty = info->port.tty; 410 tty = info->tty;
411 if (!tty) 411 if (!tty)
412 return; 412 return;
413 413
@@ -451,8 +451,8 @@ static int startup(struct m68k_serial * info)
451 uart->ustcnt = USTCNT_UEN | USTCNT_RXEN | USTCNT_RX_INTR_MASK; 451 uart->ustcnt = USTCNT_UEN | USTCNT_RXEN | USTCNT_RX_INTR_MASK;
452#endif 452#endif
453 453
454 if (info->port.tty) 454 if (info->tty)
455 clear_bit(TTY_IO_ERROR, &info->port.tty->flags); 455 clear_bit(TTY_IO_ERROR, &info->tty->flags);
456 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0; 456 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
457 457
458 /* 458 /*
@@ -486,8 +486,8 @@ static void shutdown(struct m68k_serial * info)
486 info->xmit_buf = 0; 486 info->xmit_buf = 0;
487 } 487 }
488 488
489 if (info->port.tty) 489 if (info->tty)
490 set_bit(TTY_IO_ERROR, &info->port.tty->flags); 490 set_bit(TTY_IO_ERROR, &info->tty->flags);
491 491
492 info->flags &= ~S_INITIALIZED; 492 info->flags &= ~S_INITIALIZED;
493 local_irq_restore(flags); 493 local_irq_restore(flags);
@@ -553,9 +553,9 @@ static void change_speed(struct m68k_serial *info)
553 unsigned cflag; 553 unsigned cflag;
554 int i; 554 int i;
555 555
556 if (!info->port.tty || !info->port.tty->termios) 556 if (!info->tty || !info->tty->termios)
557 return; 557 return;
558 cflag = info->port.tty->termios->c_cflag; 558 cflag = info->tty->termios->c_cflag;
559 if (!(port = info->port)) 559 if (!(port = info->port))
560 return; 560 return;
561 561
@@ -970,7 +970,6 @@ static void send_break(struct m68k_serial * info, unsigned int duration)
970static int rs_ioctl(struct tty_struct *tty, struct file * file, 970static int rs_ioctl(struct tty_struct *tty, struct file * file,
971 unsigned int cmd, unsigned long arg) 971 unsigned int cmd, unsigned long arg)
972{ 972{
973 int error;
974 struct m68k_serial * info = (struct m68k_serial *)tty->driver_data; 973 struct m68k_serial * info = (struct m68k_serial *)tty->driver_data;
975 int retval; 974 int retval;
976 975
@@ -1104,7 +1103,7 @@ static void rs_close(struct tty_struct *tty, struct file * filp)
1104 tty_ldisc_flush(tty); 1103 tty_ldisc_flush(tty);
1105 tty->closing = 0; 1104 tty->closing = 0;
1106 info->event = 0; 1105 info->event = 0;
1107 info->port.tty = NULL; 1106 info->tty = NULL;
1108#warning "This is not and has never been valid so fix it" 1107#warning "This is not and has never been valid so fix it"
1109#if 0 1108#if 0
1110 if (tty->ldisc.num != ldiscs[N_TTY].num) { 1109 if (tty->ldisc.num != ldiscs[N_TTY].num) {
@@ -1142,7 +1141,7 @@ void rs_hangup(struct tty_struct *tty)
1142 info->event = 0; 1141 info->event = 0;
1143 info->count = 0; 1142 info->count = 0;
1144 info->flags &= ~S_NORMAL_ACTIVE; 1143 info->flags &= ~S_NORMAL_ACTIVE;
1145 info->port.tty = NULL; 1144 info->tty = NULL;
1146 wake_up_interruptible(&info->open_wait); 1145 wake_up_interruptible(&info->open_wait);
1147} 1146}
1148 1147
@@ -1261,7 +1260,7 @@ int rs_open(struct tty_struct *tty, struct file * filp)
1261 1260
1262 info->count++; 1261 info->count++;
1263 tty->driver_data = info; 1262 tty->driver_data = info;
1264 info->port.tty = tty; 1263 info->tty = tty;
1265 1264
1266 /* 1265 /*
1267 * Start up serial port 1266 * Start up serial port
@@ -1338,7 +1337,7 @@ rs68328_init(void)
1338 info = &m68k_soft[i]; 1337 info = &m68k_soft[i];
1339 info->magic = SERIAL_MAGIC; 1338 info->magic = SERIAL_MAGIC;
1340 info->port = (int) &uart_addr[i]; 1339 info->port = (int) &uart_addr[i];
1341 info->port.tty = NULL; 1340 info->tty = NULL;
1342 info->irq = uart_irqs[i]; 1341 info->irq = uart_irqs[i];
1343 info->custom_divisor = 16; 1342 info->custom_divisor = 16;
1344 info->close_delay = 50; 1343 info->close_delay = 50;
diff --git a/drivers/tty/serial/68360serial.c b/drivers/tty/serial/68360serial.c
index 88b13356ec10..bc21eeae8fde 100644
--- a/drivers/tty/serial/68360serial.c
+++ b/drivers/tty/serial/68360serial.c
@@ -2428,6 +2428,7 @@ static const struct tty_operations rs_360_ops = {
2428 /* .read_proc = rs_360_read_proc, */ 2428 /* .read_proc = rs_360_read_proc, */
2429 .tiocmget = rs_360_tiocmget, 2429 .tiocmget = rs_360_tiocmget,
2430 .tiocmset = rs_360_tiocmset, 2430 .tiocmset = rs_360_tiocmset,
2431 .get_icount = rs_360_get_icount,
2431}; 2432};
2432 2433
2433static int __init rs_360_init(void) 2434static int __init rs_360_init(void)
diff --git a/drivers/tty/serial/bfin_5xx.c b/drivers/tty/serial/bfin_5xx.c
index e381b895b04d..9b1ff2b6bb37 100644
--- a/drivers/tty/serial/bfin_5xx.c
+++ b/drivers/tty/serial/bfin_5xx.c
@@ -370,10 +370,8 @@ static irqreturn_t bfin_serial_rx_int(int irq, void *dev_id)
370{ 370{
371 struct bfin_serial_port *uart = dev_id; 371 struct bfin_serial_port *uart = dev_id;
372 372
373 spin_lock(&uart->port.lock);
374 while (UART_GET_LSR(uart) & DR) 373 while (UART_GET_LSR(uart) & DR)
375 bfin_serial_rx_chars(uart); 374 bfin_serial_rx_chars(uart);
376 spin_unlock(&uart->port.lock);
377 375
378 return IRQ_HANDLED; 376 return IRQ_HANDLED;
379} 377}
@@ -490,9 +488,8 @@ void bfin_serial_rx_dma_timeout(struct bfin_serial_port *uart)
490{ 488{
491 int x_pos, pos; 489 int x_pos, pos;
492 490
493 dma_disable_irq(uart->tx_dma_channel); 491 dma_disable_irq_nosync(uart->rx_dma_channel);
494 dma_disable_irq(uart->rx_dma_channel); 492 spin_lock_bh(&uart->rx_lock);
495 spin_lock_bh(&uart->port.lock);
496 493
497 /* 2D DMA RX buffer ring is used. Because curr_y_count and 494 /* 2D DMA RX buffer ring is used. Because curr_y_count and
498 * curr_x_count can't be read as an atomic operation, 495 * curr_x_count can't be read as an atomic operation,
@@ -523,8 +520,7 @@ void bfin_serial_rx_dma_timeout(struct bfin_serial_port *uart)
523 uart->rx_dma_buf.tail = uart->rx_dma_buf.head; 520 uart->rx_dma_buf.tail = uart->rx_dma_buf.head;
524 } 521 }
525 522
526 spin_unlock_bh(&uart->port.lock); 523 spin_unlock_bh(&uart->rx_lock);
527 dma_enable_irq(uart->tx_dma_channel);
528 dma_enable_irq(uart->rx_dma_channel); 524 dma_enable_irq(uart->rx_dma_channel);
529 525
530 mod_timer(&(uart->rx_dma_timer), jiffies + DMA_RX_FLUSH_JIFFIES); 526 mod_timer(&(uart->rx_dma_timer), jiffies + DMA_RX_FLUSH_JIFFIES);
@@ -571,7 +567,7 @@ static irqreturn_t bfin_serial_dma_rx_int(int irq, void *dev_id)
571 unsigned short irqstat; 567 unsigned short irqstat;
572 int x_pos, pos; 568 int x_pos, pos;
573 569
574 spin_lock(&uart->port.lock); 570 spin_lock(&uart->rx_lock);
575 irqstat = get_dma_curr_irqstat(uart->rx_dma_channel); 571 irqstat = get_dma_curr_irqstat(uart->rx_dma_channel);
576 clear_dma_irqstat(uart->rx_dma_channel); 572 clear_dma_irqstat(uart->rx_dma_channel);
577 573
@@ -589,7 +585,7 @@ static irqreturn_t bfin_serial_dma_rx_int(int irq, void *dev_id)
589 uart->rx_dma_buf.tail = uart->rx_dma_buf.head; 585 uart->rx_dma_buf.tail = uart->rx_dma_buf.head;
590 } 586 }
591 587
592 spin_unlock(&uart->port.lock); 588 spin_unlock(&uart->rx_lock);
593 589
594 return IRQ_HANDLED; 590 return IRQ_HANDLED;
595} 591}
@@ -1332,6 +1328,7 @@ static int bfin_serial_probe(struct platform_device *pdev)
1332 } 1328 }
1333 1329
1334#ifdef CONFIG_SERIAL_BFIN_DMA 1330#ifdef CONFIG_SERIAL_BFIN_DMA
1331 spin_lock_init(&uart->rx_lock);
1335 uart->tx_done = 1; 1332 uart->tx_done = 1;
1336 uart->tx_count = 0; 1333 uart->tx_count = 0;
1337 1334
diff --git a/drivers/tty/serial/max3100.c b/drivers/tty/serial/max3100.c
index beb1afa27d8d..7b951adac54b 100644
--- a/drivers/tty/serial/max3100.c
+++ b/drivers/tty/serial/max3100.c
@@ -601,7 +601,7 @@ static int max3100_startup(struct uart_port *port)
601 s->rts = 0; 601 s->rts = 0;
602 602
603 sprintf(b, "max3100-%d", s->minor); 603 sprintf(b, "max3100-%d", s->minor);
604 s->workqueue = create_freezeable_workqueue(b); 604 s->workqueue = create_freezable_workqueue(b);
605 if (!s->workqueue) { 605 if (!s->workqueue) {
606 dev_warn(&s->spi->dev, "cannot create workqueue\n"); 606 dev_warn(&s->spi->dev, "cannot create workqueue\n");
607 return -EBUSY; 607 return -EBUSY;
diff --git a/drivers/tty/serial/max3107.c b/drivers/tty/serial/max3107.c
index 910870edf708..750b4f627315 100644
--- a/drivers/tty/serial/max3107.c
+++ b/drivers/tty/serial/max3107.c
@@ -833,7 +833,7 @@ static int max3107_startup(struct uart_port *port)
833 struct max3107_port *s = container_of(port, struct max3107_port, port); 833 struct max3107_port *s = container_of(port, struct max3107_port, port);
834 834
835 /* Initialize work queue */ 835 /* Initialize work queue */
836 s->workqueue = create_freezeable_workqueue("max3107"); 836 s->workqueue = create_freezable_workqueue("max3107");
837 if (!s->workqueue) { 837 if (!s->workqueue) {
838 dev_err(&s->spi->dev, "Workqueue creation failed\n"); 838 dev_err(&s->spi->dev, "Workqueue creation failed\n");
839 return -EBUSY; 839 return -EBUSY;
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index 8e0dd254eb11..81f13958e751 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -571,6 +571,7 @@ struct sysrq_state {
571 unsigned int alt_use; 571 unsigned int alt_use;
572 bool active; 572 bool active;
573 bool need_reinject; 573 bool need_reinject;
574 bool reinjecting;
574}; 575};
575 576
576static void sysrq_reinject_alt_sysrq(struct work_struct *work) 577static void sysrq_reinject_alt_sysrq(struct work_struct *work)
@@ -581,6 +582,10 @@ static void sysrq_reinject_alt_sysrq(struct work_struct *work)
581 unsigned int alt_code = sysrq->alt_use; 582 unsigned int alt_code = sysrq->alt_use;
582 583
583 if (sysrq->need_reinject) { 584 if (sysrq->need_reinject) {
585 /* we do not want the assignment to be reordered */
586 sysrq->reinjecting = true;
587 mb();
588
584 /* Simulate press and release of Alt + SysRq */ 589 /* Simulate press and release of Alt + SysRq */
585 input_inject_event(handle, EV_KEY, alt_code, 1); 590 input_inject_event(handle, EV_KEY, alt_code, 1);
586 input_inject_event(handle, EV_KEY, KEY_SYSRQ, 1); 591 input_inject_event(handle, EV_KEY, KEY_SYSRQ, 1);
@@ -589,6 +594,9 @@ static void sysrq_reinject_alt_sysrq(struct work_struct *work)
589 input_inject_event(handle, EV_KEY, KEY_SYSRQ, 0); 594 input_inject_event(handle, EV_KEY, KEY_SYSRQ, 0);
590 input_inject_event(handle, EV_KEY, alt_code, 0); 595 input_inject_event(handle, EV_KEY, alt_code, 0);
591 input_inject_event(handle, EV_SYN, SYN_REPORT, 1); 596 input_inject_event(handle, EV_SYN, SYN_REPORT, 1);
597
598 mb();
599 sysrq->reinjecting = false;
592 } 600 }
593} 601}
594 602
@@ -599,6 +607,13 @@ static bool sysrq_filter(struct input_handle *handle,
599 bool was_active = sysrq->active; 607 bool was_active = sysrq->active;
600 bool suppress; 608 bool suppress;
601 609
610 /*
611 * Do not filter anything if we are in the process of re-injecting
612 * Alt+SysRq combination.
613 */
614 if (sysrq->reinjecting)
615 return false;
616
602 switch (type) { 617 switch (type) {
603 618
604 case EV_SYN: 619 case EV_SYN:
@@ -629,7 +644,7 @@ static bool sysrq_filter(struct input_handle *handle,
629 sysrq->alt_use = sysrq->alt; 644 sysrq->alt_use = sysrq->alt;
630 /* 645 /*
631 * If nothing else will be pressed we'll need 646 * If nothing else will be pressed we'll need
632 * to * re-inject Alt-SysRq keysroke. 647 * to re-inject Alt-SysRq keysroke.
633 */ 648 */
634 sysrq->need_reinject = true; 649 sysrq->need_reinject = true;
635 } 650 }
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index d6ede989ff22..4ab49d4eebf4 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1607,6 +1607,7 @@ static const struct usb_device_id acm_ids[] = {
1607 { NOKIA_PCSUITE_ACM_INFO(0x0154), }, /* Nokia 5800 XpressMusic */ 1607 { NOKIA_PCSUITE_ACM_INFO(0x0154), }, /* Nokia 5800 XpressMusic */
1608 { NOKIA_PCSUITE_ACM_INFO(0x04ce), }, /* Nokia E90 */ 1608 { NOKIA_PCSUITE_ACM_INFO(0x04ce), }, /* Nokia E90 */
1609 { NOKIA_PCSUITE_ACM_INFO(0x01d4), }, /* Nokia E55 */ 1609 { NOKIA_PCSUITE_ACM_INFO(0x01d4), }, /* Nokia E55 */
1610 { NOKIA_PCSUITE_ACM_INFO(0x0302), }, /* Nokia N8 */
1610 { SAMSUNG_PCSUITE_ACM_INFO(0x6651), }, /* Samsung GTi8510 (INNOV8) */ 1611 { SAMSUNG_PCSUITE_ACM_INFO(0x6651), }, /* Samsung GTi8510 (INNOV8) */
1611 1612
1612 /* NOTE: non-Nokia COMM/ACM/0xff is likely MSFT RNDIS... NOT a modem! */ 1613 /* NOTE: non-Nokia COMM/ACM/0xff is likely MSFT RNDIS... NOT a modem! */
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 6a95017fa62b..e935f71d7a34 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1955,7 +1955,6 @@ int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg)
1955 1955
1956 dev_dbg(&rhdev->dev, "usb %s%s\n", 1956 dev_dbg(&rhdev->dev, "usb %s%s\n",
1957 (msg.event & PM_EVENT_AUTO ? "auto-" : ""), "resume"); 1957 (msg.event & PM_EVENT_AUTO ? "auto-" : ""), "resume");
1958 clear_bit(HCD_FLAG_WAKEUP_PENDING, &hcd->flags);
1959 if (!hcd->driver->bus_resume) 1958 if (!hcd->driver->bus_resume)
1960 return -ENOENT; 1959 return -ENOENT;
1961 if (hcd->state == HC_STATE_RUNNING) 1960 if (hcd->state == HC_STATE_RUNNING)
@@ -1963,6 +1962,7 @@ int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg)
1963 1962
1964 hcd->state = HC_STATE_RESUMING; 1963 hcd->state = HC_STATE_RESUMING;
1965 status = hcd->driver->bus_resume(hcd); 1964 status = hcd->driver->bus_resume(hcd);
1965 clear_bit(HCD_FLAG_WAKEUP_PENDING, &hcd->flags);
1966 if (status == 0) { 1966 if (status == 0) {
1967 /* TRSMRCY = 10 msec */ 1967 /* TRSMRCY = 10 msec */
1968 msleep(10); 1968 msleep(10);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 4310cc4b1cb5..d041c6826e43 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -2753,6 +2753,11 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
2753 udev->ttport = hdev->ttport; 2753 udev->ttport = hdev->ttport;
2754 } else if (udev->speed != USB_SPEED_HIGH 2754 } else if (udev->speed != USB_SPEED_HIGH
2755 && hdev->speed == USB_SPEED_HIGH) { 2755 && hdev->speed == USB_SPEED_HIGH) {
2756 if (!hub->tt.hub) {
2757 dev_err(&udev->dev, "parent hub has no TT\n");
2758 retval = -EINVAL;
2759 goto fail;
2760 }
2756 udev->tt = &hub->tt; 2761 udev->tt = &hub->tt;
2757 udev->ttport = port1; 2762 udev->ttport = port1;
2758 } 2763 }
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 06bb9d4587e9..d50099675f28 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -546,6 +546,8 @@ config USB_GADGET_CI13XXX_MSM
546 ci13xxx_udc core. 546 ci13xxx_udc core.
547 This driver depends on OTG driver for PHY initialization, 547 This driver depends on OTG driver for PHY initialization,
548 clock management, powering up VBUS, and power management. 548 clock management, powering up VBUS, and power management.
549 This driver is not supported on boards like trout which
550 has an external PHY.
549 551
550 Say "y" to link the driver statically, or "m" to build a 552 Say "y" to link the driver statically, or "m" to build a
551 dynamically linked module called "ci13xxx_msm" and force all 553 dynamically linked module called "ci13xxx_msm" and force all
diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c
index b5dbb2308f56..6d8e533949eb 100644
--- a/drivers/usb/gadget/f_mass_storage.c
+++ b/drivers/usb/gadget/f_mass_storage.c
@@ -293,6 +293,7 @@
293 293
294#include <linux/usb/ch9.h> 294#include <linux/usb/ch9.h>
295#include <linux/usb/gadget.h> 295#include <linux/usb/gadget.h>
296#include <linux/usb/composite.h>
296 297
297#include "gadget_chips.h" 298#include "gadget_chips.h"
298 299
@@ -2763,7 +2764,7 @@ static struct fsg_common *fsg_common_init(struct fsg_common *common,
2763 return ERR_PTR(-ENOMEM); 2764 return ERR_PTR(-ENOMEM);
2764 common->free_storage_on_release = 1; 2765 common->free_storage_on_release = 1;
2765 } else { 2766 } else {
2766 memset(common, 0, sizeof common); 2767 memset(common, 0, sizeof *common);
2767 common->free_storage_on_release = 0; 2768 common->free_storage_on_release = 0;
2768 } 2769 }
2769 2770
diff --git a/drivers/usb/gadget/r8a66597-udc.c b/drivers/usb/gadget/r8a66597-udc.c
index 20d43da319ae..015118535f77 100644
--- a/drivers/usb/gadget/r8a66597-udc.c
+++ b/drivers/usb/gadget/r8a66597-udc.c
@@ -258,7 +258,7 @@ static int pipe_buffer_setting(struct r8a66597 *r8a66597,
258 break; 258 break;
259 case R8A66597_BULK: 259 case R8A66597_BULK:
260 /* isochronous pipes may be used as bulk pipes */ 260 /* isochronous pipes may be used as bulk pipes */
261 if (info->pipe > R8A66597_BASE_PIPENUM_BULK) 261 if (info->pipe >= R8A66597_BASE_PIPENUM_BULK)
262 bufnum = info->pipe - R8A66597_BASE_PIPENUM_BULK; 262 bufnum = info->pipe - R8A66597_BASE_PIPENUM_BULK;
263 else 263 else
264 bufnum = info->pipe - R8A66597_BASE_PIPENUM_ISOC; 264 bufnum = info->pipe - R8A66597_BASE_PIPENUM_ISOC;
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 24046c0f5878..0e6afa260ed8 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -151,6 +151,8 @@ config USB_EHCI_MSM
151 Qualcomm chipsets. Root Hub has inbuilt TT. 151 Qualcomm chipsets. Root Hub has inbuilt TT.
152 This driver depends on OTG driver for PHY initialization, 152 This driver depends on OTG driver for PHY initialization,
153 clock management, powering up VBUS, and power management. 153 clock management, powering up VBUS, and power management.
154 This driver is not supported on boards like trout which
155 has an external PHY.
154 156
155config USB_EHCI_HCD_PPC_OF 157config USB_EHCI_HCD_PPC_OF
156 bool "EHCI support for PPC USB controller on OF platform bus" 158 bool "EHCI support for PPC USB controller on OF platform bus"
diff --git a/drivers/usb/host/ehci-au1xxx.c b/drivers/usb/host/ehci-au1xxx.c
index 2baf8a849086..a869e3c103d3 100644
--- a/drivers/usb/host/ehci-au1xxx.c
+++ b/drivers/usb/host/ehci-au1xxx.c
@@ -227,8 +227,8 @@ static int ehci_hcd_au1xxx_drv_suspend(struct device *dev)
227 * mark HW unaccessible. The PM and USB cores make sure that 227 * mark HW unaccessible. The PM and USB cores make sure that
228 * the root hub is either suspended or stopped. 228 * the root hub is either suspended or stopped.
229 */ 229 */
230 spin_lock_irqsave(&ehci->lock, flags);
231 ehci_prepare_ports_for_controller_suspend(ehci, device_may_wakeup(dev)); 230 ehci_prepare_ports_for_controller_suspend(ehci, device_may_wakeup(dev));
231 spin_lock_irqsave(&ehci->lock, flags);
232 ehci_writel(ehci, 0, &ehci->regs->intr_enable); 232 ehci_writel(ehci, 0, &ehci->regs->intr_enable);
233 (void)ehci_readl(ehci, &ehci->regs->intr_enable); 233 (void)ehci_readl(ehci, &ehci->regs->intr_enable);
234 234
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 796ea0c8900f..8a515f0d5988 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -111,6 +111,7 @@ static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci,
111{ 111{
112 int port; 112 int port;
113 u32 temp; 113 u32 temp;
114 unsigned long flags;
114 115
115 /* If remote wakeup is enabled for the root hub but disabled 116 /* If remote wakeup is enabled for the root hub but disabled
116 * for the controller, we must adjust all the port wakeup flags 117 * for the controller, we must adjust all the port wakeup flags
@@ -120,6 +121,8 @@ static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci,
120 if (!ehci_to_hcd(ehci)->self.root_hub->do_remote_wakeup || do_wakeup) 121 if (!ehci_to_hcd(ehci)->self.root_hub->do_remote_wakeup || do_wakeup)
121 return; 122 return;
122 123
124 spin_lock_irqsave(&ehci->lock, flags);
125
123 /* clear phy low-power mode before changing wakeup flags */ 126 /* clear phy low-power mode before changing wakeup flags */
124 if (ehci->has_hostpc) { 127 if (ehci->has_hostpc) {
125 port = HCS_N_PORTS(ehci->hcs_params); 128 port = HCS_N_PORTS(ehci->hcs_params);
@@ -131,7 +134,9 @@ static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci,
131 temp = ehci_readl(ehci, hostpc_reg); 134 temp = ehci_readl(ehci, hostpc_reg);
132 ehci_writel(ehci, temp & ~HOSTPC_PHCD, hostpc_reg); 135 ehci_writel(ehci, temp & ~HOSTPC_PHCD, hostpc_reg);
133 } 136 }
137 spin_unlock_irqrestore(&ehci->lock, flags);
134 msleep(5); 138 msleep(5);
139 spin_lock_irqsave(&ehci->lock, flags);
135 } 140 }
136 141
137 port = HCS_N_PORTS(ehci->hcs_params); 142 port = HCS_N_PORTS(ehci->hcs_params);
@@ -170,6 +175,8 @@ static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci,
170 /* Does the root hub have a port wakeup pending? */ 175 /* Does the root hub have a port wakeup pending? */
171 if (!suspending && (ehci_readl(ehci, &ehci->regs->status) & STS_PCD)) 176 if (!suspending && (ehci_readl(ehci, &ehci->regs->status) & STS_PCD))
172 usb_hcd_resume_root_hub(ehci_to_hcd(ehci)); 177 usb_hcd_resume_root_hub(ehci_to_hcd(ehci));
178
179 spin_unlock_irqrestore(&ehci->lock, flags);
173} 180}
174 181
175static int ehci_bus_suspend (struct usb_hcd *hcd) 182static int ehci_bus_suspend (struct usb_hcd *hcd)
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index 680f2ef4e59f..f784ceb862a3 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -796,7 +796,7 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
796 hcd = usb_create_hcd(&ehci_omap_hc_driver, &pdev->dev, 796 hcd = usb_create_hcd(&ehci_omap_hc_driver, &pdev->dev,
797 dev_name(&pdev->dev)); 797 dev_name(&pdev->dev));
798 if (!hcd) { 798 if (!hcd) {
799 dev_dbg(&pdev->dev, "failed to create hcd with err %d\n", ret); 799 dev_err(&pdev->dev, "failed to create hcd with err %d\n", ret);
800 ret = -ENOMEM; 800 ret = -ENOMEM;
801 goto err_create_hcd; 801 goto err_create_hcd;
802 } 802 }
@@ -864,7 +864,7 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
864 864
865 ret = omap_start_ehc(omap, hcd); 865 ret = omap_start_ehc(omap, hcd);
866 if (ret) { 866 if (ret) {
867 dev_dbg(&pdev->dev, "failed to start ehci\n"); 867 dev_err(&pdev->dev, "failed to start ehci with err %d\n", ret);
868 goto err_start; 868 goto err_start;
869 } 869 }
870 870
@@ -879,7 +879,7 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
879 879
880 ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED); 880 ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
881 if (ret) { 881 if (ret) {
882 dev_dbg(&pdev->dev, "failed to add hcd with err %d\n", ret); 882 dev_err(&pdev->dev, "failed to add hcd with err %d\n", ret);
883 goto err_add_hcd; 883 goto err_add_hcd;
884 } 884 }
885 885
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index bed07d4aab06..07bb982e59f6 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -367,8 +367,8 @@ static int ehci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
367 * mark HW unaccessible. The PM and USB cores make sure that 367 * mark HW unaccessible. The PM and USB cores make sure that
368 * the root hub is either suspended or stopped. 368 * the root hub is either suspended or stopped.
369 */ 369 */
370 spin_lock_irqsave (&ehci->lock, flags);
371 ehci_prepare_ports_for_controller_suspend(ehci, do_wakeup); 370 ehci_prepare_ports_for_controller_suspend(ehci, do_wakeup);
371 spin_lock_irqsave (&ehci->lock, flags);
372 ehci_writel(ehci, 0, &ehci->regs->intr_enable); 372 ehci_writel(ehci, 0, &ehci->regs->intr_enable);
373 (void)ehci_readl(ehci, &ehci->regs->intr_enable); 373 (void)ehci_readl(ehci, &ehci->regs->intr_enable);
374 374
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
index 990f06b89eaa..2e9602a10e9b 100644
--- a/drivers/usb/host/sl811-hcd.c
+++ b/drivers/usb/host/sl811-hcd.c
@@ -861,6 +861,7 @@ static int sl811h_urb_enqueue(
861 DBG("dev %d ep%d maxpacket %d\n", 861 DBG("dev %d ep%d maxpacket %d\n",
862 udev->devnum, epnum, ep->maxpacket); 862 udev->devnum, epnum, ep->maxpacket);
863 retval = -EINVAL; 863 retval = -EINVAL;
864 kfree(ep);
864 goto fail; 865 goto fail;
865 } 866 }
866 867
diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c
index eeba228eb2af..9d49d1cd7ce2 100644
--- a/drivers/usb/musb/blackfin.c
+++ b/drivers/usb/musb/blackfin.c
@@ -404,6 +404,7 @@ static int bfin_musb_init(struct musb *musb)
404 musb->xceiv->set_power = bfin_musb_set_power; 404 musb->xceiv->set_power = bfin_musb_set_power;
405 405
406 musb->isr = blackfin_interrupt; 406 musb->isr = blackfin_interrupt;
407 musb->double_buffer_not_ok = true;
407 408
408 return 0; 409 return 0;
409} 410}
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 07cf394e491b..54a8bd1047d6 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -128,12 +128,7 @@ MODULE_ALIAS("platform:" MUSB_DRIVER_NAME);
128 128
129static inline struct musb *dev_to_musb(struct device *dev) 129static inline struct musb *dev_to_musb(struct device *dev)
130{ 130{
131#ifdef CONFIG_USB_MUSB_HDRC_HCD
132 /* usbcore insists dev->driver_data is a "struct hcd *" */
133 return hcd_to_musb(dev_get_drvdata(dev));
134#else
135 return dev_get_drvdata(dev); 131 return dev_get_drvdata(dev);
136#endif
137} 132}
138 133
139/*-------------------------------------------------------------------------*/ 134/*-------------------------------------------------------------------------*/
@@ -1876,10 +1871,9 @@ allocate_instance(struct device *dev,
1876 musb = kzalloc(sizeof *musb, GFP_KERNEL); 1871 musb = kzalloc(sizeof *musb, GFP_KERNEL);
1877 if (!musb) 1872 if (!musb)
1878 return NULL; 1873 return NULL;
1879 dev_set_drvdata(dev, musb);
1880 1874
1881#endif 1875#endif
1882 1876 dev_set_drvdata(dev, musb);
1883 musb->mregs = mbase; 1877 musb->mregs = mbase;
1884 musb->ctrl_base = mbase; 1878 musb->ctrl_base = mbase;
1885 musb->nIrq = -ENODEV; 1879 musb->nIrq = -ENODEV;
@@ -2191,7 +2185,7 @@ static int __init musb_probe(struct platform_device *pdev)
2191 void __iomem *base; 2185 void __iomem *base;
2192 2186
2193 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2187 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2194 if (!iomem || irq == 0) 2188 if (!iomem || irq <= 0)
2195 return -ENODEV; 2189 return -ENODEV;
2196 2190
2197 base = ioremap(iomem->start, resource_size(iomem)); 2191 base = ioremap(iomem->start, resource_size(iomem));
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index d0c236f8e191..d74a8113ae74 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -488,6 +488,18 @@ struct musb {
488 unsigned set_address:1; 488 unsigned set_address:1;
489 unsigned test_mode:1; 489 unsigned test_mode:1;
490 unsigned softconnect:1; 490 unsigned softconnect:1;
491 /*
492 * FIXME: Remove this flag.
493 *
494 * This is only added to allow Blackfin to work
495 * with current driver. For some unknown reason
496 * Blackfin doesn't work with double buffering
497 * and that's enabled by default.
498 *
499 * We added this flag to forcefully disable double
500 * buffering until we get it working.
501 */
502 unsigned double_buffer_not_ok:1 __deprecated;
491 503
492 u8 address; 504 u8 address;
493 u8 test_mode_nr; 505 u8 test_mode_nr;
diff --git a/drivers/usb/musb/musb_dma.h b/drivers/usb/musb/musb_dma.h
index 916065ba9e70..3a97c4e2d4f5 100644
--- a/drivers/usb/musb/musb_dma.h
+++ b/drivers/usb/musb/musb_dma.h
@@ -169,6 +169,9 @@ struct dma_controller {
169 dma_addr_t dma_addr, 169 dma_addr_t dma_addr,
170 u32 length); 170 u32 length);
171 int (*channel_abort)(struct dma_channel *); 171 int (*channel_abort)(struct dma_channel *);
172 int (*is_compatible)(struct dma_channel *channel,
173 u16 maxpacket,
174 void *buf, u32 length);
172}; 175};
173 176
174/* called after channel_program(), may indicate a fault */ 177/* called after channel_program(), may indicate a fault */
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index ed58c6c8f15c..2fe304611dcf 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -92,11 +92,33 @@
92 92
93/* ----------------------------------------------------------------------- */ 93/* ----------------------------------------------------------------------- */
94 94
95#define is_buffer_mapped(req) (is_dma_capable() && \
96 (req->map_state != UN_MAPPED))
97
95/* Maps the buffer to dma */ 98/* Maps the buffer to dma */
96 99
97static inline void map_dma_buffer(struct musb_request *request, 100static inline void map_dma_buffer(struct musb_request *request,
98 struct musb *musb) 101 struct musb *musb, struct musb_ep *musb_ep)
99{ 102{
103 int compatible = true;
104 struct dma_controller *dma = musb->dma_controller;
105
106 request->map_state = UN_MAPPED;
107
108 if (!is_dma_capable() || !musb_ep->dma)
109 return;
110
111 /* Check if DMA engine can handle this request.
112 * DMA code must reject the USB request explicitly.
113 * Default behaviour is to map the request.
114 */
115 if (dma->is_compatible)
116 compatible = dma->is_compatible(musb_ep->dma,
117 musb_ep->packet_sz, request->request.buf,
118 request->request.length);
119 if (!compatible)
120 return;
121
100 if (request->request.dma == DMA_ADDR_INVALID) { 122 if (request->request.dma == DMA_ADDR_INVALID) {
101 request->request.dma = dma_map_single( 123 request->request.dma = dma_map_single(
102 musb->controller, 124 musb->controller,
@@ -105,7 +127,7 @@ static inline void map_dma_buffer(struct musb_request *request,
105 request->tx 127 request->tx
106 ? DMA_TO_DEVICE 128 ? DMA_TO_DEVICE
107 : DMA_FROM_DEVICE); 129 : DMA_FROM_DEVICE);
108 request->mapped = 1; 130 request->map_state = MUSB_MAPPED;
109 } else { 131 } else {
110 dma_sync_single_for_device(musb->controller, 132 dma_sync_single_for_device(musb->controller,
111 request->request.dma, 133 request->request.dma,
@@ -113,7 +135,7 @@ static inline void map_dma_buffer(struct musb_request *request,
113 request->tx 135 request->tx
114 ? DMA_TO_DEVICE 136 ? DMA_TO_DEVICE
115 : DMA_FROM_DEVICE); 137 : DMA_FROM_DEVICE);
116 request->mapped = 0; 138 request->map_state = PRE_MAPPED;
117 } 139 }
118} 140}
119 141
@@ -121,11 +143,14 @@ static inline void map_dma_buffer(struct musb_request *request,
121static inline void unmap_dma_buffer(struct musb_request *request, 143static inline void unmap_dma_buffer(struct musb_request *request,
122 struct musb *musb) 144 struct musb *musb)
123{ 145{
146 if (!is_buffer_mapped(request))
147 return;
148
124 if (request->request.dma == DMA_ADDR_INVALID) { 149 if (request->request.dma == DMA_ADDR_INVALID) {
125 DBG(20, "not unmapping a never mapped buffer\n"); 150 DBG(20, "not unmapping a never mapped buffer\n");
126 return; 151 return;
127 } 152 }
128 if (request->mapped) { 153 if (request->map_state == MUSB_MAPPED) {
129 dma_unmap_single(musb->controller, 154 dma_unmap_single(musb->controller,
130 request->request.dma, 155 request->request.dma,
131 request->request.length, 156 request->request.length,
@@ -133,16 +158,15 @@ static inline void unmap_dma_buffer(struct musb_request *request,
133 ? DMA_TO_DEVICE 158 ? DMA_TO_DEVICE
134 : DMA_FROM_DEVICE); 159 : DMA_FROM_DEVICE);
135 request->request.dma = DMA_ADDR_INVALID; 160 request->request.dma = DMA_ADDR_INVALID;
136 request->mapped = 0; 161 } else { /* PRE_MAPPED */
137 } else {
138 dma_sync_single_for_cpu(musb->controller, 162 dma_sync_single_for_cpu(musb->controller,
139 request->request.dma, 163 request->request.dma,
140 request->request.length, 164 request->request.length,
141 request->tx 165 request->tx
142 ? DMA_TO_DEVICE 166 ? DMA_TO_DEVICE
143 : DMA_FROM_DEVICE); 167 : DMA_FROM_DEVICE);
144
145 } 168 }
169 request->map_state = UN_MAPPED;
146} 170}
147 171
148/* 172/*
@@ -172,8 +196,7 @@ __acquires(ep->musb->lock)
172 196
173 ep->busy = 1; 197 ep->busy = 1;
174 spin_unlock(&musb->lock); 198 spin_unlock(&musb->lock);
175 if (is_dma_capable() && ep->dma) 199 unmap_dma_buffer(req, musb);
176 unmap_dma_buffer(req, musb);
177 if (request->status == 0) 200 if (request->status == 0)
178 DBG(5, "%s done request %p, %d/%d\n", 201 DBG(5, "%s done request %p, %d/%d\n",
179 ep->end_point.name, request, 202 ep->end_point.name, request,
@@ -335,7 +358,7 @@ static void txstate(struct musb *musb, struct musb_request *req)
335 csr); 358 csr);
336 359
337#ifndef CONFIG_MUSB_PIO_ONLY 360#ifndef CONFIG_MUSB_PIO_ONLY
338 if (is_dma_capable() && musb_ep->dma) { 361 if (is_buffer_mapped(req)) {
339 struct dma_controller *c = musb->dma_controller; 362 struct dma_controller *c = musb->dma_controller;
340 size_t request_size; 363 size_t request_size;
341 364
@@ -436,8 +459,7 @@ static void txstate(struct musb *musb, struct musb_request *req)
436 * Unmap the dma buffer back to cpu if dma channel 459 * Unmap the dma buffer back to cpu if dma channel
437 * programming fails 460 * programming fails
438 */ 461 */
439 if (is_dma_capable() && musb_ep->dma) 462 unmap_dma_buffer(req, musb);
440 unmap_dma_buffer(req, musb);
441 463
442 musb_write_fifo(musb_ep->hw_ep, fifo_count, 464 musb_write_fifo(musb_ep->hw_ep, fifo_count,
443 (u8 *) (request->buf + request->actual)); 465 (u8 *) (request->buf + request->actual));
@@ -627,7 +649,7 @@ static void rxstate(struct musb *musb, struct musb_request *req)
627 return; 649 return;
628 } 650 }
629 651
630 if (is_cppi_enabled() && musb_ep->dma) { 652 if (is_cppi_enabled() && is_buffer_mapped(req)) {
631 struct dma_controller *c = musb->dma_controller; 653 struct dma_controller *c = musb->dma_controller;
632 struct dma_channel *channel = musb_ep->dma; 654 struct dma_channel *channel = musb_ep->dma;
633 655
@@ -658,7 +680,7 @@ static void rxstate(struct musb *musb, struct musb_request *req)
658 len = musb_readw(epio, MUSB_RXCOUNT); 680 len = musb_readw(epio, MUSB_RXCOUNT);
659 if (request->actual < request->length) { 681 if (request->actual < request->length) {
660#ifdef CONFIG_USB_INVENTRA_DMA 682#ifdef CONFIG_USB_INVENTRA_DMA
661 if (is_dma_capable() && musb_ep->dma) { 683 if (is_buffer_mapped(req)) {
662 struct dma_controller *c; 684 struct dma_controller *c;
663 struct dma_channel *channel; 685 struct dma_channel *channel;
664 int use_dma = 0; 686 int use_dma = 0;
@@ -742,7 +764,7 @@ static void rxstate(struct musb *musb, struct musb_request *req)
742 fifo_count = min_t(unsigned, len, fifo_count); 764 fifo_count = min_t(unsigned, len, fifo_count);
743 765
744#ifdef CONFIG_USB_TUSB_OMAP_DMA 766#ifdef CONFIG_USB_TUSB_OMAP_DMA
745 if (tusb_dma_omap() && musb_ep->dma) { 767 if (tusb_dma_omap() && is_buffer_mapped(req)) {
746 struct dma_controller *c = musb->dma_controller; 768 struct dma_controller *c = musb->dma_controller;
747 struct dma_channel *channel = musb_ep->dma; 769 struct dma_channel *channel = musb_ep->dma;
748 u32 dma_addr = request->dma + request->actual; 770 u32 dma_addr = request->dma + request->actual;
@@ -762,7 +784,7 @@ static void rxstate(struct musb *musb, struct musb_request *req)
762 * programming fails. This buffer is mapped if the 784 * programming fails. This buffer is mapped if the
763 * channel allocation is successful 785 * channel allocation is successful
764 */ 786 */
765 if (is_dma_capable() && musb_ep->dma) { 787 if (is_buffer_mapped(req)) {
766 unmap_dma_buffer(req, musb); 788 unmap_dma_buffer(req, musb);
767 789
768 /* 790 /*
@@ -989,7 +1011,11 @@ static int musb_gadget_enable(struct usb_ep *ep,
989 /* Set TXMAXP with the FIFO size of the endpoint 1011 /* Set TXMAXP with the FIFO size of the endpoint
990 * to disable double buffering mode. 1012 * to disable double buffering mode.
991 */ 1013 */
992 musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11)); 1014 if (musb->double_buffer_not_ok)
1015 musb_writew(regs, MUSB_TXMAXP, hw_ep->max_packet_sz_tx);
1016 else
1017 musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz
1018 | (musb_ep->hb_mult << 11));
993 1019
994 csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG; 1020 csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG;
995 if (musb_readw(regs, MUSB_TXCSR) 1021 if (musb_readw(regs, MUSB_TXCSR)
@@ -1025,7 +1051,11 @@ static int musb_gadget_enable(struct usb_ep *ep,
1025 /* Set RXMAXP with the FIFO size of the endpoint 1051 /* Set RXMAXP with the FIFO size of the endpoint
1026 * to disable double buffering mode. 1052 * to disable double buffering mode.
1027 */ 1053 */
1028 musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11)); 1054 if (musb->double_buffer_not_ok)
1055 musb_writew(regs, MUSB_RXMAXP, hw_ep->max_packet_sz_tx);
1056 else
1057 musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz
1058 | (musb_ep->hb_mult << 11));
1029 1059
1030 /* force shared fifo to OUT-only mode */ 1060 /* force shared fifo to OUT-only mode */
1031 if (hw_ep->is_shared_fifo) { 1061 if (hw_ep->is_shared_fifo) {
@@ -1214,10 +1244,7 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
1214 request->epnum = musb_ep->current_epnum; 1244 request->epnum = musb_ep->current_epnum;
1215 request->tx = musb_ep->is_in; 1245 request->tx = musb_ep->is_in;
1216 1246
1217 if (is_dma_capable() && musb_ep->dma) 1247 map_dma_buffer(request, musb, musb_ep);
1218 map_dma_buffer(request, musb);
1219 else
1220 request->mapped = 0;
1221 1248
1222 spin_lock_irqsave(&musb->lock, lockflags); 1249 spin_lock_irqsave(&musb->lock, lockflags);
1223 1250
diff --git a/drivers/usb/musb/musb_gadget.h b/drivers/usb/musb/musb_gadget.h
index dec8dc008191..a55354fbccf5 100644
--- a/drivers/usb/musb/musb_gadget.h
+++ b/drivers/usb/musb/musb_gadget.h
@@ -35,13 +35,19 @@
35#ifndef __MUSB_GADGET_H 35#ifndef __MUSB_GADGET_H
36#define __MUSB_GADGET_H 36#define __MUSB_GADGET_H
37 37
38enum buffer_map_state {
39 UN_MAPPED = 0,
40 PRE_MAPPED,
41 MUSB_MAPPED
42};
43
38struct musb_request { 44struct musb_request {
39 struct usb_request request; 45 struct usb_request request;
40 struct musb_ep *ep; 46 struct musb_ep *ep;
41 struct musb *musb; 47 struct musb *musb;
42 u8 tx; /* endpoint direction */ 48 u8 tx; /* endpoint direction */
43 u8 epnum; 49 u8 epnum;
44 u8 mapped; 50 enum buffer_map_state map_state;
45}; 51};
46 52
47static inline struct musb_request *to_musb_request(struct usb_request *req) 53static inline struct musb_request *to_musb_request(struct usb_request *req)
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 4d5bcb4e14d2..0f523d7db57b 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -609,7 +609,7 @@ musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep)
609 /* Set RXMAXP with the FIFO size of the endpoint 609 /* Set RXMAXP with the FIFO size of the endpoint
610 * to disable double buffer mode. 610 * to disable double buffer mode.
611 */ 611 */
612 if (musb->hwvers < MUSB_HWVERS_2000) 612 if (musb->double_buffer_not_ok)
613 musb_writew(ep->regs, MUSB_RXMAXP, ep->max_packet_sz_rx); 613 musb_writew(ep->regs, MUSB_RXMAXP, ep->max_packet_sz_rx);
614 else 614 else
615 musb_writew(ep->regs, MUSB_RXMAXP, 615 musb_writew(ep->regs, MUSB_RXMAXP,
@@ -784,14 +784,13 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
784 /* protocol/endpoint/interval/NAKlimit */ 784 /* protocol/endpoint/interval/NAKlimit */
785 if (epnum) { 785 if (epnum) {
786 musb_writeb(epio, MUSB_TXTYPE, qh->type_reg); 786 musb_writeb(epio, MUSB_TXTYPE, qh->type_reg);
787 if (can_bulk_split(musb, qh->type)) 787 if (musb->double_buffer_not_ok)
788 musb_writew(epio, MUSB_TXMAXP, 788 musb_writew(epio, MUSB_TXMAXP,
789 packet_sz 789 hw_ep->max_packet_sz_tx);
790 | ((hw_ep->max_packet_sz_tx /
791 packet_sz) - 1) << 11);
792 else 790 else
793 musb_writew(epio, MUSB_TXMAXP, 791 musb_writew(epio, MUSB_TXMAXP,
794 packet_sz); 792 qh->maxpacket |
793 ((qh->hb_mult - 1) << 11));
795 musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg); 794 musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg);
796 } else { 795 } else {
797 musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg); 796 musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg);
diff --git a/drivers/usb/musb/musbhsdma.h b/drivers/usb/musb/musbhsdma.h
index f763d62f151c..21056c924c74 100644
--- a/drivers/usb/musb/musbhsdma.h
+++ b/drivers/usb/musb/musbhsdma.h
@@ -94,24 +94,33 @@ static inline void musb_write_hsdma_addr(void __iomem *mbase,
94{ 94{
95 musb_writew(mbase, 95 musb_writew(mbase,
96 MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_ADDR_LOW), 96 MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_ADDR_LOW),
97 ((u16)((u32) dma_addr & 0xFFFF))); 97 dma_addr);
98 musb_writew(mbase, 98 musb_writew(mbase,
99 MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_ADDR_HIGH), 99 MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_ADDR_HIGH),
100 ((u16)(((u32) dma_addr >> 16) & 0xFFFF))); 100 (dma_addr >> 16));
101} 101}
102 102
103static inline u32 musb_read_hsdma_count(void __iomem *mbase, u8 bchannel) 103static inline u32 musb_read_hsdma_count(void __iomem *mbase, u8 bchannel)
104{ 104{
105 return musb_readl(mbase, 105 u32 count = musb_readw(mbase,
106 MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_HIGH)); 106 MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_HIGH));
107
108 count = count << 16;
109
110 count |= musb_readw(mbase,
111 MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_LOW));
112
113 return count;
107} 114}
108 115
109static inline void musb_write_hsdma_count(void __iomem *mbase, 116static inline void musb_write_hsdma_count(void __iomem *mbase,
110 u8 bchannel, u32 len) 117 u8 bchannel, u32 len)
111{ 118{
112 musb_writel(mbase, 119 musb_writew(mbase,
120 MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_LOW),len);
121 musb_writew(mbase,
113 MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_HIGH), 122 MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_HIGH),
114 len); 123 (len >> 16));
115} 124}
116 125
117#endif /* CONFIG_BLACKFIN */ 126#endif /* CONFIG_BLACKFIN */
diff --git a/drivers/usb/otg/Kconfig b/drivers/usb/otg/Kconfig
index 9fb875d5f09c..9ffc8237fb4b 100644
--- a/drivers/usb/otg/Kconfig
+++ b/drivers/usb/otg/Kconfig
@@ -103,6 +103,8 @@ config USB_MSM_OTG_72K
103 required after resetting the hardware and power management. 103 required after resetting the hardware and power management.
104 This driver is required even for peripheral only or host only 104 This driver is required even for peripheral only or host only
105 mode configurations. 105 mode configurations.
106 This driver is not supported on boards like trout which
107 has an external PHY.
106 108
107config AB8500_USB 109config AB8500_USB
108 tristate "AB8500 USB Transceiver Driver" 110 tristate "AB8500 USB Transceiver Driver"
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 4787c0cd063f..f349a3629d00 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -100,6 +100,7 @@ struct ftdi_sio_quirk {
100static int ftdi_jtag_probe(struct usb_serial *serial); 100static int ftdi_jtag_probe(struct usb_serial *serial);
101static int ftdi_mtxorb_hack_setup(struct usb_serial *serial); 101static int ftdi_mtxorb_hack_setup(struct usb_serial *serial);
102static int ftdi_NDI_device_setup(struct usb_serial *serial); 102static int ftdi_NDI_device_setup(struct usb_serial *serial);
103static int ftdi_stmclite_probe(struct usb_serial *serial);
103static void ftdi_USB_UIRT_setup(struct ftdi_private *priv); 104static void ftdi_USB_UIRT_setup(struct ftdi_private *priv);
104static void ftdi_HE_TIRA1_setup(struct ftdi_private *priv); 105static void ftdi_HE_TIRA1_setup(struct ftdi_private *priv);
105 106
@@ -123,6 +124,10 @@ static struct ftdi_sio_quirk ftdi_HE_TIRA1_quirk = {
123 .port_probe = ftdi_HE_TIRA1_setup, 124 .port_probe = ftdi_HE_TIRA1_setup,
124}; 125};
125 126
127static struct ftdi_sio_quirk ftdi_stmclite_quirk = {
128 .probe = ftdi_stmclite_probe,
129};
130
126/* 131/*
127 * The 8U232AM has the same API as the sio except for: 132 * The 8U232AM has the same API as the sio except for:
128 * - it can support MUCH higher baudrates; up to: 133 * - it can support MUCH higher baudrates; up to:
@@ -616,6 +621,7 @@ static struct usb_device_id id_table_combined [] = {
616 { USB_DEVICE(FTDI_VID, FTDI_OCEANIC_PID) }, 621 { USB_DEVICE(FTDI_VID, FTDI_OCEANIC_PID) },
617 { USB_DEVICE(TTI_VID, TTI_QL355P_PID) }, 622 { USB_DEVICE(TTI_VID, TTI_QL355P_PID) },
618 { USB_DEVICE(FTDI_VID, FTDI_RM_CANVIEW_PID) }, 623 { USB_DEVICE(FTDI_VID, FTDI_RM_CANVIEW_PID) },
624 { USB_DEVICE(ACTON_VID, ACTON_SPECTRAPRO_PID) },
619 { USB_DEVICE(CONTEC_VID, CONTEC_COM1USBH_PID) }, 625 { USB_DEVICE(CONTEC_VID, CONTEC_COM1USBH_PID) },
620 { USB_DEVICE(BANDB_VID, BANDB_USOTL4_PID) }, 626 { USB_DEVICE(BANDB_VID, BANDB_USOTL4_PID) },
621 { USB_DEVICE(BANDB_VID, BANDB_USTL4_PID) }, 627 { USB_DEVICE(BANDB_VID, BANDB_USTL4_PID) },
@@ -810,6 +816,8 @@ static struct usb_device_id id_table_combined [] = {
810 { USB_DEVICE(FTDI_VID, FTDI_DOTEC_PID) }, 816 { USB_DEVICE(FTDI_VID, FTDI_DOTEC_PID) },
811 { USB_DEVICE(QIHARDWARE_VID, MILKYMISTONE_JTAGSERIAL_PID), 817 { USB_DEVICE(QIHARDWARE_VID, MILKYMISTONE_JTAGSERIAL_PID),
812 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, 818 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
819 { USB_DEVICE(ST_VID, ST_STMCLT1030_PID),
820 .driver_info = (kernel_ulong_t)&ftdi_stmclite_quirk },
813 { }, /* Optional parameter entry */ 821 { }, /* Optional parameter entry */
814 { } /* Terminating entry */ 822 { } /* Terminating entry */
815}; 823};
@@ -1709,6 +1717,25 @@ static int ftdi_jtag_probe(struct usb_serial *serial)
1709} 1717}
1710 1718
1711/* 1719/*
1720 * First and second port on STMCLiteadaptors is reserved for JTAG interface
1721 * and the forth port for pio
1722 */
1723static int ftdi_stmclite_probe(struct usb_serial *serial)
1724{
1725 struct usb_device *udev = serial->dev;
1726 struct usb_interface *interface = serial->interface;
1727
1728 dbg("%s", __func__);
1729
1730 if (interface == udev->actconfig->interface[2])
1731 return 0;
1732
1733 dev_info(&udev->dev, "Ignoring serial port reserved for JTAG\n");
1734
1735 return -ENODEV;
1736}
1737
1738/*
1712 * The Matrix Orbital VK204-25-USB has an invalid IN endpoint. 1739 * The Matrix Orbital VK204-25-USB has an invalid IN endpoint.
1713 * We have to correct it if we want to read from it. 1740 * We have to correct it if we want to read from it.
1714 */ 1741 */
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index ed160def8584..117e8e6f93c6 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -518,6 +518,12 @@
518#define RATOC_PRODUCT_ID_USB60F 0xb020 518#define RATOC_PRODUCT_ID_USB60F 0xb020
519 519
520/* 520/*
521 * Acton Research Corp.
522 */
523#define ACTON_VID 0x0647 /* Vendor ID */
524#define ACTON_SPECTRAPRO_PID 0x0100
525
526/*
521 * Contec products (http://www.contec.com) 527 * Contec products (http://www.contec.com)
522 * Submitted by Daniel Sangorrin 528 * Submitted by Daniel Sangorrin
523 */ 529 */
@@ -1034,6 +1040,12 @@
1034#define WHT_PID 0x0004 /* Wireless Handheld Terminal */ 1040#define WHT_PID 0x0004 /* Wireless Handheld Terminal */
1035 1041
1036/* 1042/*
1043 * STMicroelectonics
1044 */
1045#define ST_VID 0x0483
1046#define ST_STMCLT1030_PID 0x3747 /* ST Micro Connect Lite STMCLT1030 */
1047
1048/*
1037 * Papouch products (http://www.papouch.com/) 1049 * Papouch products (http://www.papouch.com/)
1038 * Submitted by Folkert van Heusden 1050 * Submitted by Folkert van Heusden
1039 */ 1051 */
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index cd769ef24f8a..3b246d93cf22 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -2889,8 +2889,8 @@ static void load_application_firmware(struct edgeport_serial *edge_serial)
2889 2889
2890 dbg("%s %d.%d.%d", fw_info, rec->data[0], rec->data[1], build); 2890 dbg("%s %d.%d.%d", fw_info, rec->data[0], rec->data[1], build);
2891 2891
2892 edge_serial->product_info.FirmwareMajorVersion = fw->data[0]; 2892 edge_serial->product_info.FirmwareMajorVersion = rec->data[0];
2893 edge_serial->product_info.FirmwareMinorVersion = fw->data[1]; 2893 edge_serial->product_info.FirmwareMinorVersion = rec->data[1];
2894 edge_serial->product_info.FirmwareBuildNumber = cpu_to_le16(build); 2894 edge_serial->product_info.FirmwareBuildNumber = cpu_to_le16(build);
2895 2895
2896 for (rec = ihex_next_binrec(rec); rec; 2896 for (rec = ihex_next_binrec(rec); rec;
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index b2902f307b47..a910004f4079 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -369,9 +369,9 @@ failed_1port:
369 369
370static void __exit ti_exit(void) 370static void __exit ti_exit(void)
371{ 371{
372 usb_deregister(&ti_usb_driver);
372 usb_serial_deregister(&ti_1port_device); 373 usb_serial_deregister(&ti_1port_device);
373 usb_serial_deregister(&ti_2port_device); 374 usb_serial_deregister(&ti_2port_device);
374 usb_deregister(&ti_usb_driver);
375} 375}
376 376
377 377
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 24bd5d7c3deb..c1602b8c5594 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1397,6 +1397,13 @@ UNUSUAL_DEV( 0x0f19, 0x0105, 0x0100, 0x0100,
1397 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 1397 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1398 US_FL_IGNORE_RESIDUE ), 1398 US_FL_IGNORE_RESIDUE ),
1399 1399
1400/* Submitted by Nick Holloway */
1401UNUSUAL_DEV( 0x0f88, 0x042e, 0x0100, 0x0100,
1402 "VTech",
1403 "Kidizoom",
1404 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1405 US_FL_FIX_CAPACITY ),
1406
1400/* Reported by Michael Stattmann <michael@stattmann.com> */ 1407/* Reported by Michael Stattmann <michael@stattmann.com> */
1401UNUSUAL_DEV( 0x0fce, 0xd008, 0x0000, 0x0000, 1408UNUSUAL_DEV( 0x0fce, 0xd008, 0x0000, 0x0000,
1402 "Sony Ericsson", 1409 "Sony Ericsson",
@@ -1890,6 +1897,13 @@ UNUSUAL_DEV( 0x1e68, 0x001b, 0x0000, 0x0000,
1890 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 1897 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1891 US_FL_IGNORE_RESIDUE | US_FL_SANE_SENSE ), 1898 US_FL_IGNORE_RESIDUE | US_FL_SANE_SENSE ),
1892 1899
1900/* Reported by Jasper Mackenzie <scarletpimpernal@hotmail.com> */
1901UNUSUAL_DEV( 0x1e74, 0x4621, 0x0000, 0x0000,
1902 "Coby Electronics",
1903 "MP3 Player",
1904 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1905 US_FL_BULK_IGNORE_TAG | US_FL_MAX_SECTORS_64 ),
1906
1893UNUSUAL_DEV( 0x2116, 0x0320, 0x0001, 0x0001, 1907UNUSUAL_DEV( 0x2116, 0x0320, 0x0001, 0x0001,
1894 "ST", 1908 "ST",
1895 "2A", 1909 "2A",
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 9b3ca103135f..f616cefc95ba 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -128,8 +128,7 @@ static void handle_tx(struct vhost_net *net)
128 size_t hdr_size; 128 size_t hdr_size;
129 struct socket *sock; 129 struct socket *sock;
130 130
131 /* TODO: check that we are running from vhost_worker? 131 /* TODO: check that we are running from vhost_worker? */
132 * Not sure it's worth it, it's straight-forward enough. */
133 sock = rcu_dereference_check(vq->private_data, 1); 132 sock = rcu_dereference_check(vq->private_data, 1);
134 if (!sock) 133 if (!sock)
135 return; 134 return;
@@ -306,7 +305,8 @@ static void handle_rx_big(struct vhost_net *net)
306 size_t len, total_len = 0; 305 size_t len, total_len = 0;
307 int err; 306 int err;
308 size_t hdr_size; 307 size_t hdr_size;
309 struct socket *sock = rcu_dereference(vq->private_data); 308 /* TODO: check that we are running from vhost_worker? */
309 struct socket *sock = rcu_dereference_check(vq->private_data, 1);
310 if (!sock || skb_queue_empty(&sock->sk->sk_receive_queue)) 310 if (!sock || skb_queue_empty(&sock->sk->sk_receive_queue))
311 return; 311 return;
312 312
@@ -415,7 +415,8 @@ static void handle_rx_mergeable(struct vhost_net *net)
415 int err, headcount; 415 int err, headcount;
416 size_t vhost_hlen, sock_hlen; 416 size_t vhost_hlen, sock_hlen;
417 size_t vhost_len, sock_len; 417 size_t vhost_len, sock_len;
418 struct socket *sock = rcu_dereference(vq->private_data); 418 /* TODO: check that we are running from vhost_worker? */
419 struct socket *sock = rcu_dereference_check(vq->private_data, 1);
419 if (!sock || skb_queue_empty(&sock->sk->sk_receive_queue)) 420 if (!sock || skb_queue_empty(&sock->sk->sk_receive_queue))
420 return; 421 return;
421 422
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 2af44b7b1f3f..b3363ae38518 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -173,9 +173,9 @@ static inline int vhost_has_feature(struct vhost_dev *dev, int bit)
173{ 173{
174 unsigned acked_features; 174 unsigned acked_features;
175 175
176 acked_features = 176 /* TODO: check that we are running from vhost_worker or dev mutex is
177 rcu_dereference_index_check(dev->acked_features, 177 * held? */
178 lockdep_is_held(&dev->mutex)); 178 acked_features = rcu_dereference_index_check(dev->acked_features, 1);
179 return acked_features & (1 << bit); 179 return acked_features & (1 << bit);
180} 180}
181 181
diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c
index 3a7e9ff8a746..38e96ab90945 100644
--- a/drivers/w1/masters/omap_hdq.c
+++ b/drivers/w1/masters/omap_hdq.c
@@ -593,19 +593,17 @@ static int __devinit omap_hdq_probe(struct platform_device *pdev)
593 593
594 /* get interface & functional clock objects */ 594 /* get interface & functional clock objects */
595 hdq_data->hdq_ick = clk_get(&pdev->dev, "ick"); 595 hdq_data->hdq_ick = clk_get(&pdev->dev, "ick");
596 hdq_data->hdq_fck = clk_get(&pdev->dev, "fck"); 596 if (IS_ERR(hdq_data->hdq_ick)) {
597 dev_dbg(&pdev->dev, "Can't get HDQ ick clock object\n");
598 ret = PTR_ERR(hdq_data->hdq_ick);
599 goto err_ick;
600 }
597 601
598 if (IS_ERR(hdq_data->hdq_ick) || IS_ERR(hdq_data->hdq_fck)) { 602 hdq_data->hdq_fck = clk_get(&pdev->dev, "fck");
599 dev_dbg(&pdev->dev, "Can't get HDQ clock objects\n"); 603 if (IS_ERR(hdq_data->hdq_fck)) {
600 if (IS_ERR(hdq_data->hdq_ick)) { 604 dev_dbg(&pdev->dev, "Can't get HDQ fck clock object\n");
601 ret = PTR_ERR(hdq_data->hdq_ick); 605 ret = PTR_ERR(hdq_data->hdq_fck);
602 goto err_clk; 606 goto err_fck;
603 }
604 if (IS_ERR(hdq_data->hdq_fck)) {
605 ret = PTR_ERR(hdq_data->hdq_fck);
606 clk_put(hdq_data->hdq_ick);
607 goto err_clk;
608 }
609 } 607 }
610 608
611 hdq_data->hdq_usecount = 0; 609 hdq_data->hdq_usecount = 0;
@@ -665,10 +663,12 @@ err_fnclk:
665 clk_disable(hdq_data->hdq_ick); 663 clk_disable(hdq_data->hdq_ick);
666 664
667err_intfclk: 665err_intfclk:
668 clk_put(hdq_data->hdq_ick);
669 clk_put(hdq_data->hdq_fck); 666 clk_put(hdq_data->hdq_fck);
670 667
671err_clk: 668err_fck:
669 clk_put(hdq_data->hdq_ick);
670
671err_ick:
672 iounmap(hdq_data->hdq_base); 672 iounmap(hdq_data->hdq_base);
673 673
674err_ioremap: 674err_ioremap:
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 2e2400e7322e..31649b7b672f 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -862,12 +862,12 @@ config SBC_EPX_C3_WATCHDOG
862 862
863# M68K Architecture 863# M68K Architecture
864 864
865config M548x_WATCHDOG 865config M54xx_WATCHDOG
866 tristate "MCF548x watchdog support" 866 tristate "MCF54xx watchdog support"
867 depends on M548x 867 depends on M548x
868 help 868 help
869 To compile this driver as a module, choose M here: the 869 To compile this driver as a module, choose M here: the
870 module will be called m548x_wdt. 870 module will be called m54xx_wdt.
871 871
872# MIPS Architecture 872# MIPS Architecture
873 873
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index dd776651917c..20e44c4782b3 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -106,7 +106,7 @@ obj-$(CONFIG_SBC_EPX_C3_WATCHDOG) += sbc_epx_c3.o
106# M32R Architecture 106# M32R Architecture
107 107
108# M68K Architecture 108# M68K Architecture
109obj-$(CONFIG_M548x_WATCHDOG) += m548x_wdt.o 109obj-$(CONFIG_M54xx_WATCHDOG) += m54xx_wdt.o
110 110
111# MIPS Architecture 111# MIPS Architecture
112obj-$(CONFIG_ATH79_WDT) += ath79_wdt.o 112obj-$(CONFIG_ATH79_WDT) += ath79_wdt.o
diff --git a/drivers/watchdog/m548x_wdt.c b/drivers/watchdog/m54xx_wdt.c
index cabbcfe1c847..4d43286074aa 100644
--- a/drivers/watchdog/m548x_wdt.c
+++ b/drivers/watchdog/m54xx_wdt.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * drivers/watchdog/m548x_wdt.c 2 * drivers/watchdog/m54xx_wdt.c
3 * 3 *
4 * Watchdog driver for ColdFire MCF548x processors 4 * Watchdog driver for ColdFire MCF547x & MCF548x processors
5 * Copyright 2010 (c) Philippe De Muyter <phdm@macqel.be> 5 * Copyright 2010 (c) Philippe De Muyter <phdm@macqel.be>
6 * 6 *
7 * Adapted from the IXP4xx watchdog driver, which carries these notices: 7 * Adapted from the IXP4xx watchdog driver, which carries these notices:
@@ -29,8 +29,8 @@
29#include <linux/uaccess.h> 29#include <linux/uaccess.h>
30 30
31#include <asm/coldfire.h> 31#include <asm/coldfire.h>
32#include <asm/m548xsim.h> 32#include <asm/m54xxsim.h>
33#include <asm/m548xgpt.h> 33#include <asm/m54xxgpt.h>
34 34
35static int nowayout = WATCHDOG_NOWAYOUT; 35static int nowayout = WATCHDOG_NOWAYOUT;
36static unsigned int heartbeat = 30; /* (secs) Default is 0.5 minute */ 36static unsigned int heartbeat = 30; /* (secs) Default is 0.5 minute */
@@ -76,7 +76,7 @@ static void wdt_keepalive(void)
76 __raw_writel(gms0, MCF_MBAR + MCF_GPT_GMS0); 76 __raw_writel(gms0, MCF_MBAR + MCF_GPT_GMS0);
77} 77}
78 78
79static int m548x_wdt_open(struct inode *inode, struct file *file) 79static int m54xx_wdt_open(struct inode *inode, struct file *file)
80{ 80{
81 if (test_and_set_bit(WDT_IN_USE, &wdt_status)) 81 if (test_and_set_bit(WDT_IN_USE, &wdt_status))
82 return -EBUSY; 82 return -EBUSY;
@@ -86,7 +86,7 @@ static int m548x_wdt_open(struct inode *inode, struct file *file)
86 return nonseekable_open(inode, file); 86 return nonseekable_open(inode, file);
87} 87}
88 88
89static ssize_t m548x_wdt_write(struct file *file, const char *data, 89static ssize_t m54xx_wdt_write(struct file *file, const char *data,
90 size_t len, loff_t *ppos) 90 size_t len, loff_t *ppos)
91{ 91{
92 if (len) { 92 if (len) {
@@ -112,10 +112,10 @@ static ssize_t m548x_wdt_write(struct file *file, const char *data,
112static const struct watchdog_info ident = { 112static const struct watchdog_info ident = {
113 .options = WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT | 113 .options = WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT |
114 WDIOF_KEEPALIVEPING, 114 WDIOF_KEEPALIVEPING,
115 .identity = "Coldfire M548x Watchdog", 115 .identity = "Coldfire M54xx Watchdog",
116}; 116};
117 117
118static long m548x_wdt_ioctl(struct file *file, unsigned int cmd, 118static long m54xx_wdt_ioctl(struct file *file, unsigned int cmd,
119 unsigned long arg) 119 unsigned long arg)
120{ 120{
121 int ret = -ENOTTY; 121 int ret = -ENOTTY;
@@ -161,7 +161,7 @@ static long m548x_wdt_ioctl(struct file *file, unsigned int cmd,
161 return ret; 161 return ret;
162} 162}
163 163
164static int m548x_wdt_release(struct inode *inode, struct file *file) 164static int m54xx_wdt_release(struct inode *inode, struct file *file)
165{ 165{
166 if (test_bit(WDT_OK_TO_CLOSE, &wdt_status)) 166 if (test_bit(WDT_OK_TO_CLOSE, &wdt_status))
167 wdt_disable(); 167 wdt_disable();
@@ -177,45 +177,45 @@ static int m548x_wdt_release(struct inode *inode, struct file *file)
177} 177}
178 178
179 179
180static const struct file_operations m548x_wdt_fops = { 180static const struct file_operations m54xx_wdt_fops = {
181 .owner = THIS_MODULE, 181 .owner = THIS_MODULE,
182 .llseek = no_llseek, 182 .llseek = no_llseek,
183 .write = m548x_wdt_write, 183 .write = m54xx_wdt_write,
184 .unlocked_ioctl = m548x_wdt_ioctl, 184 .unlocked_ioctl = m54xx_wdt_ioctl,
185 .open = m548x_wdt_open, 185 .open = m54xx_wdt_open,
186 .release = m548x_wdt_release, 186 .release = m54xx_wdt_release,
187}; 187};
188 188
189static struct miscdevice m548x_wdt_miscdev = { 189static struct miscdevice m54xx_wdt_miscdev = {
190 .minor = WATCHDOG_MINOR, 190 .minor = WATCHDOG_MINOR,
191 .name = "watchdog", 191 .name = "watchdog",
192 .fops = &m548x_wdt_fops, 192 .fops = &m54xx_wdt_fops,
193}; 193};
194 194
195static int __init m548x_wdt_init(void) 195static int __init m54xx_wdt_init(void)
196{ 196{
197 if (!request_mem_region(MCF_MBAR + MCF_GPT_GCIR0, 4, 197 if (!request_mem_region(MCF_MBAR + MCF_GPT_GCIR0, 4,
198 "Coldfire M548x Watchdog")) { 198 "Coldfire M54xx Watchdog")) {
199 printk(KERN_WARNING 199 printk(KERN_WARNING
200 "Coldfire M548x Watchdog : I/O region busy\n"); 200 "Coldfire M54xx Watchdog : I/O region busy\n");
201 return -EBUSY; 201 return -EBUSY;
202 } 202 }
203 printk(KERN_INFO "ColdFire watchdog driver is loaded.\n"); 203 printk(KERN_INFO "ColdFire watchdog driver is loaded.\n");
204 204
205 return misc_register(&m548x_wdt_miscdev); 205 return misc_register(&m54xx_wdt_miscdev);
206} 206}
207 207
208static void __exit m548x_wdt_exit(void) 208static void __exit m54xx_wdt_exit(void)
209{ 209{
210 misc_deregister(&m548x_wdt_miscdev); 210 misc_deregister(&m54xx_wdt_miscdev);
211 release_mem_region(MCF_MBAR + MCF_GPT_GCIR0, 4); 211 release_mem_region(MCF_MBAR + MCF_GPT_GCIR0, 4);
212} 212}
213 213
214module_init(m548x_wdt_init); 214module_init(m54xx_wdt_init);
215module_exit(m548x_wdt_exit); 215module_exit(m54xx_wdt_exit);
216 216
217MODULE_AUTHOR("Philippe De Muyter <phdm@macqel.be>"); 217MODULE_AUTHOR("Philippe De Muyter <phdm@macqel.be>");
218MODULE_DESCRIPTION("Coldfire M548x Watchdog"); 218MODULE_DESCRIPTION("Coldfire M54xx Watchdog");
219 219
220module_param(heartbeat, int, 0); 220module_param(heartbeat, int, 0);
221MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds (default 30s)"); 221MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds (default 30s)");
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index db8c4c4ac880..24177272bcb8 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -37,11 +37,19 @@ static enum shutdown_state shutting_down = SHUTDOWN_INVALID;
37#ifdef CONFIG_PM_SLEEP 37#ifdef CONFIG_PM_SLEEP
38static int xen_hvm_suspend(void *data) 38static int xen_hvm_suspend(void *data)
39{ 39{
40 int err;
40 struct sched_shutdown r = { .reason = SHUTDOWN_suspend }; 41 struct sched_shutdown r = { .reason = SHUTDOWN_suspend };
41 int *cancelled = data; 42 int *cancelled = data;
42 43
43 BUG_ON(!irqs_disabled()); 44 BUG_ON(!irqs_disabled());
44 45
46 err = sysdev_suspend(PMSG_SUSPEND);
47 if (err) {
48 printk(KERN_ERR "xen_hvm_suspend: sysdev_suspend failed: %d\n",
49 err);
50 return err;
51 }
52
45 *cancelled = HYPERVISOR_sched_op(SCHEDOP_shutdown, &r); 53 *cancelled = HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
46 54
47 xen_hvm_post_suspend(*cancelled); 55 xen_hvm_post_suspend(*cancelled);
@@ -53,6 +61,8 @@ static int xen_hvm_suspend(void *data)
53 xen_timer_resume(); 61 xen_timer_resume();
54 } 62 }
55 63
64 sysdev_resume();
65
56 return 0; 66 return 0;
57} 67}
58 68