aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/acpi/Kconfig2
-rw-r--r--drivers/acpi/acpi_pad.c34
-rw-r--r--drivers/acpi/acpica/aclocal.h1
-rw-r--r--drivers/acpi/acpica/exutils.c2
-rw-r--r--drivers/acpi/acpica/rsutils.c2
-rw-r--r--drivers/acpi/apei/Kconfig2
-rw-r--r--drivers/acpi/apei/apei-base.c21
-rw-r--r--drivers/acpi/apei/einj.c4
-rw-r--r--drivers/acpi/apei/erst-dbg.c18
-rw-r--r--drivers/acpi/apei/erst.c29
-rw-r--r--drivers/acpi/apei/ghes.c2
-rw-r--r--drivers/acpi/apei/hest.c11
-rw-r--r--drivers/acpi/atomicio.c2
-rw-r--r--drivers/acpi/battery.c1
-rw-r--r--drivers/acpi/blacklist.c35
-rw-r--r--drivers/acpi/bus.c18
-rw-r--r--drivers/acpi/fan.c2
-rw-r--r--drivers/acpi/pci_root.c97
-rw-r--r--drivers/acpi/processor_core.c7
-rw-r--r--drivers/acpi/processor_driver.c2
-rw-r--r--drivers/acpi/processor_perflib.c4
-rw-r--r--drivers/acpi/sleep.c22
-rw-r--r--drivers/acpi/sysfs.c20
-rw-r--r--drivers/acpi/video_detect.c4
-rw-r--r--drivers/ata/Kconfig1
-rw-r--r--drivers/ata/Makefile1
-rw-r--r--drivers/ata/ahci.c18
-rw-r--r--drivers/ata/ahci.h13
-rw-r--r--drivers/ata/ahci_platform.c6
-rw-r--r--drivers/ata/ata_piix.c4
-rw-r--r--drivers/ata/libahci.c34
-rw-r--r--drivers/ata/libata-core.c25
-rw-r--r--drivers/ata/libata-eh.c4
-rw-r--r--drivers/ata/libata-sff.c45
-rw-r--r--drivers/ata/pata_artop.c3
-rw-r--r--drivers/ata/pata_cmd64x.c6
-rw-r--r--drivers/ata/pata_legacy.c15
-rw-r--r--drivers/ata/pata_via.c2
-rw-r--r--drivers/ata/pata_winbond.c282
-rw-r--r--drivers/ata/sata_dwc_460ex.c2
-rw-r--r--drivers/ata/sata_mv.c46
-rw-r--r--drivers/atm/iphase.c6
-rw-r--r--drivers/atm/iphase.h2
-rw-r--r--drivers/atm/solos-pci.c8
-rw-r--r--drivers/base/firmware_class.c2
-rw-r--r--drivers/base/power/main.c1
-rw-r--r--drivers/block/cciss.c13
-rw-r--r--drivers/block/mg_disk.c3
-rw-r--r--drivers/block/pktcdvd.c2
-rw-r--r--drivers/block/virtio_blk.c6
-rw-r--r--drivers/block/xen-blkfront.c2
-rw-r--r--drivers/char/agp/intel-agp.c18
-rw-r--r--drivers/char/agp/intel-agp.h24
-rw-r--r--drivers/char/agp/intel-gtt.c66
-rw-r--r--drivers/char/hangcheck-timer.c2
-rw-r--r--drivers/char/hvc_console.c2
-rw-r--r--drivers/char/hvsi.c2
-rw-r--r--drivers/char/hw_random/n2-drv.c2
-rw-r--r--drivers/char/ip2/ip2main.c4
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c17
-rw-r--r--drivers/char/mem.c3
-rw-r--r--drivers/char/rocket.c1
-rw-r--r--drivers/char/synclink_gt.c4
-rw-r--r--drivers/char/sysrq.c53
-rw-r--r--drivers/char/tty_io.c2
-rw-r--r--drivers/char/virtio_console.c6
-rw-r--r--drivers/char/vt.c15
-rw-r--r--drivers/char/vt_ioctl.c16
-rw-r--r--drivers/cpuidle/governors/menu.c2
-rw-r--r--drivers/dca/dca-core.c85
-rw-r--r--drivers/dma/ioat/dma_v2.c2
-rw-r--r--drivers/dma/mv_xor.c2
-rw-r--r--drivers/dma/shdma.c3
-rw-r--r--drivers/edac/amd64_edac.c10
-rw-r--r--drivers/edac/edac_mc.c3
-rw-r--r--drivers/edac/edac_mce_amd.c17
-rw-r--r--drivers/edac/i7core_edac.c1
-rw-r--r--drivers/firewire/core-transaction.c13
-rw-r--r--drivers/firewire/net.c28
-rw-r--r--drivers/firewire/ohci.c11
-rw-r--r--drivers/firewire/sbp2.c23
-rw-r--r--drivers/gpio/sx150x.c26
-rw-r--r--drivers/gpu/drm/drm_buffer.c6
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c32
-rw-r--r--drivers/gpu/drm/drm_drv.c25
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c5
-rw-r--r--drivers/gpu/drm/drm_fops.c1
-rw-r--r--drivers/gpu/drm/drm_gem.c39
-rw-r--r--drivers/gpu/drm/drm_info.c2
-rw-r--r--drivers/gpu/drm/drm_lock.c2
-rw-r--r--drivers/gpu/drm/drm_mm.c24
-rw-r--r--drivers/gpu/drm/drm_modes.c5
-rw-r--r--drivers/gpu/drm/drm_pci.c4
-rw-r--r--drivers/gpu/drm/drm_platform.c5
-rw-r--r--drivers/gpu/drm/drm_sysfs.c2
-rw-r--r--drivers/gpu/drm/drm_vm.c30
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c32
-rw-r--r--drivers/gpu/drm/i830/i830_dma.c30
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c50
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c125
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c66
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h27
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c90
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c44
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c73
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h9
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c36
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c10
-rw-r--r--drivers/gpu/drm/i915/intel_display.c339
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c79
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h11
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c3
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c4
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c9
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c2
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c8
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c89
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c20
-rw-r--r--drivers/gpu/drm/mga/mga_state.c26
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c100
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c15
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c24
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c18
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c53
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c12
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dfp.c31
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.c12
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_instmem.c13
-rw-r--r--drivers/gpu/drm/r128/r128_state.c35
-rw-r--r--drivers/gpu/drm/radeon/atombios.h2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c100
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c2
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c80
-rw-r--r--drivers/gpu/drm/radeon/r100.c27
-rw-r--r--drivers/gpu/drm/radeon/r600.c25
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c27
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_shaders.h24
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon.h9
-rw-r--r--drivers/gpu/drm/radeon/radeon_agp.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c19
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c93
-rw-r--r--drivers/gpu/drm/radeon/radeon_clocks.c58
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c177
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c120
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c30
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c29
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c223
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c83
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h7
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h5
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_state.c56
-rw-r--r--drivers/gpu/drm/radeon/rs600.c1
-rw-r--r--drivers/gpu/drm/radeon/rs690.c1
-rw-r--r--drivers/gpu/drm/radeon/rv770.c63
-rw-r--r--drivers/gpu/drm/savage/savage_bci.c8
-rw-r--r--drivers/gpu/drm/sis/sis_mm.c12
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c83
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c8
-rw-r--r--drivers/gpu/drm/via/via_dma.c28
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c179
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c17
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c34
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c4
-rw-r--r--drivers/gpu/vga/vgaarb.c2
-rw-r--r--drivers/hid/hid-core.c4
-rw-r--r--drivers/hid/hid-ids.h4
-rw-r--r--drivers/hid/hid-mosart.c1
-rw-r--r--drivers/hid/hid-topseed.c1
-rw-r--r--drivers/hid/usbhid/hid-core.c8
-rw-r--r--drivers/hid/usbhid/hid-quirks.c4
-rw-r--r--drivers/hid/usbhid/hiddev.c2
-rw-r--r--drivers/hid/usbhid/usbhid.h1
-rw-r--r--drivers/hwmon/Kconfig2
-rw-r--r--drivers/hwmon/adm1031.c43
-rw-r--r--drivers/hwmon/ads7871.c38
-rw-r--r--drivers/hwmon/coretemp.c58
-rw-r--r--drivers/hwmon/emc1403.c1
-rw-r--r--drivers/hwmon/f71882fg.c32
-rw-r--r--drivers/hwmon/f75375s.c6
-rw-r--r--drivers/hwmon/hp_accel.c2
-rw-r--r--drivers/hwmon/k8temp.c35
-rw-r--r--drivers/hwmon/lis3lv02d.c4
-rw-r--r--drivers/hwmon/lis3lv02d_i2c.c4
-rw-r--r--drivers/hwmon/lis3lv02d_spi.c4
-rw-r--r--drivers/hwmon/lm95241.c21
-rw-r--r--drivers/hwmon/pkgtemp.c23
-rw-r--r--drivers/hwmon/w83627ehf.c1
-rw-r--r--drivers/i2c/busses/i2c-cpm.c5
-rw-r--r--drivers/i2c/busses/i2c-davinci.c6
-rw-r--r--drivers/i2c/busses/i2c-ibm_iic.c3
-rw-r--r--drivers/i2c/busses/i2c-mpc.c1
-rw-r--r--drivers/i2c/busses/i2c-octeon.c2
-rw-r--r--drivers/i2c/busses/i2c-omap.c2
-rw-r--r--drivers/i2c/busses/i2c-pca-isa.c12
-rw-r--r--drivers/i2c/busses/i2c-pca-platform.c11
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c4
-rw-r--r--drivers/i2c/i2c-core.c54
-rw-r--r--drivers/ide/ide-probe.c12
-rw-r--r--[-rwxr-xr-x]drivers/idle/intel_idle.c30
-rw-r--r--drivers/ieee1394/ohci1394.c2
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.h2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c6
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c18
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c14
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.h1
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c4
-rw-r--r--drivers/input/input.c11
-rw-r--r--drivers/input/joydev.c3
-rw-r--r--drivers/input/keyboard/hil_kbd.c12
-rw-r--r--drivers/input/keyboard/pxa27x_keypad.c2
-rw-r--r--drivers/input/misc/uinput.c9
-rw-r--r--drivers/input/mouse/bcm5974.c12
-rw-r--r--drivers/input/mousedev.c8
-rw-r--r--drivers/input/serio/i8042.c2
-rw-r--r--drivers/input/tablet/wacom_sys.c23
-rw-r--r--drivers/input/tablet/wacom_wac.c8
-rw-r--r--drivers/isdn/hardware/avm/Kconfig3
-rw-r--r--drivers/isdn/sc/interrupt.c18
-rw-r--r--drivers/leds/leds-ns2.c9
-rw-r--r--drivers/macintosh/via-pmu.c42
-rw-r--r--drivers/md/.gitignore4
-rw-r--r--drivers/md/bitmap.c12
-rw-r--r--drivers/md/md.c31
-rw-r--r--drivers/md/md.h2
-rw-r--r--drivers/md/raid1.c4
-rw-r--r--drivers/media/IR/ir-keytable.c9
-rw-r--r--drivers/media/IR/ir-lirc-codec.c2
-rw-r--r--drivers/media/IR/ir-raw-event.c4
-rw-r--r--drivers/media/IR/ir-sysfs.c17
-rw-r--r--drivers/media/IR/keymaps/rc-rc6-mce.c3
-rw-r--r--drivers/media/IR/mceusb.c4
-rw-r--r--drivers/media/dvb/dvb-usb/dib0700_core.c3
-rw-r--r--drivers/media/dvb/dvb-usb/dib0700_devices.c56
-rw-r--r--drivers/media/dvb/dvb-usb/opera1.c4
-rw-r--r--drivers/media/dvb/frontends/dib7000p.c8
-rw-r--r--drivers/media/dvb/frontends/dib7000p.h5
-rw-r--r--drivers/media/dvb/mantis/Kconfig2
-rw-r--r--drivers/media/dvb/siano/smscoreapi.c31
-rw-r--r--drivers/media/radio/si470x/radio-si470x-i2c.c2
-rw-r--r--drivers/media/video/cx231xx/Makefile1
-rw-r--r--drivers/media/video/cx231xx/cx231xx-cards.c17
-rw-r--r--drivers/media/video/cx25840/cx25840-core.c2
-rw-r--r--drivers/media/video/cx88/Kconfig2
-rw-r--r--drivers/media/video/gspca/gspca.c1
-rw-r--r--drivers/media/video/gspca/sn9c20x.c3
-rw-r--r--drivers/media/video/ivtv/ivtvfb.c2
-rw-r--r--drivers/media/video/mem2mem_testdev.c3
-rw-r--r--drivers/media/video/mt9m111.c8
-rw-r--r--drivers/media/video/mt9v022.c3
-rw-r--r--drivers/media/video/mx2_camera.c4
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-ctrl.c6
-rw-r--r--drivers/media/video/s5p-fimc/fimc-core.c94
-rw-r--r--drivers/media/video/saa7134/saa7134-cards.c10
-rw-r--r--drivers/media/video/saa7164/saa7164-buffer.c5
-rw-r--r--drivers/media/video/uvc/uvc_driver.c24
-rw-r--r--drivers/media/video/uvc/uvcvideo.h1
-rw-r--r--drivers/media/video/videobuf-dma-contig.c6
-rw-r--r--drivers/media/video/videobuf-dma-sg.c11
-rw-r--r--drivers/mfd/max8925-core.c13
-rw-r--r--drivers/mfd/wm831x-irq.c9
-rw-r--r--drivers/misc/Kconfig2
-rw-r--r--drivers/misc/Makefile2
-rw-r--r--drivers/misc/bh1780gli.c1
-rw-r--r--drivers/misc/vmw_balloon.c (renamed from drivers/misc/vmware_balloon.c)0
-rw-r--r--drivers/mmc/core/sdio.c5
-rw-r--r--drivers/mmc/host/at91_mci.c1
-rw-r--r--drivers/mmc/host/imxmmc.c3
-rw-r--r--drivers/mmc/host/omap_hsmmc.c3
-rw-r--r--drivers/mmc/host/s3cmci.c2
-rw-r--r--drivers/mmc/host/sdhci-s3c.c12
-rw-r--r--drivers/mmc/host/tmio_mmc.c7
-rw-r--r--drivers/mmc/host/tmio_mmc.h13
-rw-r--r--drivers/mtd/nand/bf5xx_nand.c9
-rw-r--r--drivers/mtd/nand/mxc_nand.c47
-rw-r--r--drivers/mtd/nand/omap2.c2
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c6
-rw-r--r--drivers/mtd/onenand/samsung.c16
-rw-r--r--drivers/mtd/ubi/Kconfig.debug2
-rw-r--r--drivers/mtd/ubi/cdev.c12
-rw-r--r--drivers/mtd/ubi/scan.c2
-rw-r--r--drivers/mtd/ubi/wl.c3
-rw-r--r--drivers/net/3c59x.c47
-rw-r--r--drivers/net/Kconfig14
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/atlx/atl1.c11
-rw-r--r--drivers/net/b44.c13
-rw-r--r--drivers/net/benet/be.h1
-rw-r--r--drivers/net/benet/be_cmds.c8
-rw-r--r--drivers/net/benet/be_cmds.h2
-rw-r--r--drivers/net/benet/be_ethtool.c1
-rw-r--r--drivers/net/benet/be_hw.h7
-rw-r--r--drivers/net/benet/be_main.c47
-rw-r--r--drivers/net/bnx2x/bnx2x.h4
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c9
-rw-r--r--drivers/net/bonding/bond_3ad.c3
-rw-r--r--drivers/net/bonding/bond_alb.c3
-rw-r--r--drivers/net/bonding/bond_main.c65
-rw-r--r--drivers/net/caif/Kconfig2
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c2
-rw-r--r--drivers/net/e1000e/82571.c31
-rw-r--r--drivers/net/e1000e/defines.h4
-rw-r--r--drivers/net/e1000e/hw.h1
-rw-r--r--drivers/net/e1000e/ich8lan.c197
-rw-r--r--drivers/net/e1000e/lib.c10
-rw-r--r--drivers/net/e1000e/netdev.c29
-rw-r--r--drivers/net/ehea/ehea.h3
-rw-r--r--drivers/net/ehea/ehea_main.c69
-rw-r--r--drivers/net/ehea/ehea_qmr.h1
-rw-r--r--drivers/net/eql.c2
-rw-r--r--drivers/net/fec.c44
-rw-r--r--drivers/net/ibm_newemac/core.c4
-rw-r--r--drivers/net/ibm_newemac/debug.c2
-rw-r--r--drivers/net/ibmveth.c32
-rw-r--r--drivers/net/ks8851.c39
-rw-r--r--drivers/net/ll_temac_main.c5
-rw-r--r--drivers/net/ll_temac_mdio.c1
-rw-r--r--drivers/net/netxen/netxen_nic.h4
-rw-r--r--drivers/net/netxen/netxen_nic_init.c7
-rw-r--r--drivers/net/netxen/netxen_nic_main.c11
-rw-r--r--drivers/net/niu.c16
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c140
-rw-r--r--drivers/net/phy/mdio_bus.c4
-rw-r--r--drivers/net/phy/phy_device.c2
-rw-r--r--drivers/net/ppp_generic.c9
-rw-r--r--drivers/net/pxa168_eth.c1666
-rw-r--r--drivers/net/qlcnic/qlcnic_init.c7
-rw-r--r--drivers/net/qlcnic/qlcnic_main.c11
-rw-r--r--drivers/net/qlge/qlge_main.c4
-rw-r--r--drivers/net/r8169.c70
-rw-r--r--drivers/net/rionet.c2
-rw-r--r--drivers/net/sgiseeq.c2
-rw-r--r--drivers/net/sh_eth.c2
-rw-r--r--drivers/net/skge.c18
-rw-r--r--drivers/net/smsc911x.c1
-rw-r--r--drivers/net/stmmac/stmmac_main.c9
-rw-r--r--drivers/net/tg3.c6
-rw-r--r--drivers/net/tg3.h2
-rw-r--r--drivers/net/tulip/de2104x.c43
-rw-r--r--drivers/net/usb/hso.c2
-rw-r--r--drivers/net/usb/ipheth.c12
-rw-r--r--drivers/net/via-velocity.c2
-rw-r--r--drivers/net/wimax/i2400m/rx.c26
-rw-r--r--drivers/net/wireless/adm8211.c8
-rw-r--r--drivers/net/wireless/at76c50x-usb.c22
-rw-r--r--drivers/net/wireless/ath/ar9170/main.c4
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.h2
-rw-r--r--drivers/net/wireless/ath/regd.h1
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c14
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c32
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-lib.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c45
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c50
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c53
-rw-r--r--drivers/net/wireless/libertas/if_sdio.c2
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c2
-rw-r--r--drivers/net/wireless/mwl8k.c34
-rw-r--r--drivers/net/wireless/p54/eeprom.c6
-rw-r--r--drivers/net/wireless/p54/fwio.c2
-rw-r--r--drivers/net/wireless/p54/led.c4
-rw-r--r--drivers/net/wireless/p54/p54pci.c2
-rw-r--r--drivers/net/wireless/p54/txrx.c4
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180_dev.c6
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187_dev.c4
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187_rtl8225.c4
-rw-r--r--drivers/oprofile/buffer_sync.c27
-rw-r--r--drivers/oprofile/cpu_buffer.c2
-rw-r--r--drivers/pci/hotplug/acpi_pcihp.c6
-rw-r--r--drivers/pci/hotplug/pciehp.h16
-rw-r--r--drivers/pci/hotplug/pciehp_acpi.c4
-rw-r--r--drivers/pci/hotplug/pciehp_core.c4
-rw-r--r--drivers/pci/intel-iommu.c117
-rw-r--r--drivers/pci/iov.c2
-rw-r--r--drivers/pci/pci.h7
-rw-r--r--drivers/pci/pcie/Makefile3
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c9
-rw-r--r--drivers/pci/pcie/aer/aerdrv_acpi.c36
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c14
-rw-r--r--drivers/pci/pcie/pme.c (renamed from drivers/pci/pcie/pme/pcie_pme.c)66
-rw-r--r--drivers/pci/pcie/pme/Makefile8
-rw-r--r--drivers/pci/pcie/pme/pcie_pme.h28
-rw-r--r--drivers/pci/pcie/pme/pcie_pme_acpi.c54
-rw-r--r--drivers/pci/pcie/portdrv.h22
-rw-r--r--drivers/pci/pcie/portdrv_acpi.c77
-rw-r--r--drivers/pci/pcie/portdrv_core.c53
-rw-r--r--drivers/pci/pcie/portdrv_pci.c38
-rw-r--r--drivers/pci/quirks.c20
-rw-r--r--drivers/pci/slot.c2
-rw-r--r--drivers/pcmcia/pcmcia_resource.c57
-rw-r--r--drivers/pcmcia/pd6729.c2
-rw-r--r--drivers/platform/x86/Kconfig4
-rw-r--r--drivers/platform/x86/hp-wmi.c64
-rw-r--r--drivers/platform/x86/intel_ips.c122
-rw-r--r--drivers/platform/x86/intel_rar_register.c2
-rw-r--r--drivers/platform/x86/intel_scu_ipc.c2
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c5
-rw-r--r--drivers/power/apm_power.c1
-rw-r--r--drivers/power/intel_mid_battery.c6
-rw-r--r--drivers/regulator/88pm8607.c4
-rw-r--r--drivers/regulator/ab3100.c5
-rw-r--r--drivers/regulator/ab8500.c9
-rw-r--r--drivers/regulator/ad5398.c13
-rw-r--r--drivers/regulator/core.c6
-rw-r--r--drivers/regulator/isl6271a-regulator.c4
-rw-r--r--drivers/regulator/max1586.c12
-rw-r--r--drivers/regulator/max8649.c2
-rw-r--r--drivers/regulator/max8998.c8
-rw-r--r--drivers/regulator/tps6507x-regulator.c6
-rw-r--r--drivers/regulator/tps6586x-regulator.c4
-rw-r--r--drivers/regulator/wm831x-ldo.c7
-rw-r--r--drivers/regulator/wm8350-regulator.c2
-rw-r--r--drivers/rtc/rtc-ab3100.c2
-rw-r--r--drivers/rtc/rtc-bfin.c15
-rw-r--r--drivers/rtc/rtc-ds3232.c2
-rw-r--r--drivers/rtc/rtc-m41t80.c2
-rw-r--r--drivers/rtc/rtc-pl031.c2
-rw-r--r--drivers/rtc/rtc-s3c.c13
-rw-r--r--drivers/s390/char/ctrlchar.c4
-rw-r--r--drivers/s390/char/keyboard.c2
-rw-r--r--drivers/s390/char/tape_block.c3
-rw-r--r--drivers/s390/net/ctcm_main.c4
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c5
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c2
-rw-r--r--drivers/scsi/constants.c6
-rw-r--r--drivers/scsi/hpsa.c6
-rw-r--r--drivers/scsi/osd/osd_initiator.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c23
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h20
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c94
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c68
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c36
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c30
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h4
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/scsi/sd.c8
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.c10
-rw-r--r--drivers/serial/68328serial.c29
-rw-r--r--drivers/serial/8250_early.c4
-rw-r--r--drivers/serial/amba-pl010.c9
-rw-r--r--drivers/serial/bfin_sport_uart.c2
-rw-r--r--drivers/serial/mfd.c18
-rw-r--r--drivers/serial/mpc52xx_uart.c1
-rw-r--r--drivers/serial/mrst_max3110.c1
-rw-r--r--drivers/serial/serial_cs.c62
-rw-r--r--drivers/serial/sn_console.c2
-rw-r--r--drivers/spi/amba-pl022.c16
-rw-r--r--drivers/spi/dw_spi.c24
-rw-r--r--drivers/spi/spi.c14
-rw-r--r--drivers/spi/spi_gpio.c2
-rw-r--r--drivers/spi/spi_mpc8xxx.c10
-rw-r--r--drivers/spi/spi_s3c64xx.c37
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/batman-adv/bat_sysfs.c4
-rw-r--r--drivers/staging/batman-adv/hard-interface.c42
-rw-r--r--drivers/staging/batman-adv/icmp_socket.c12
-rw-r--r--drivers/staging/batman-adv/main.c7
-rw-r--r--drivers/staging/batman-adv/originator.c14
-rw-r--r--drivers/staging/batman-adv/routing.c16
-rw-r--r--drivers/staging/batman-adv/send.c8
-rw-r--r--drivers/staging/batman-adv/types.h1
-rw-r--r--drivers/staging/comedi/drivers/das08_cs.c1
-rw-r--r--drivers/staging/hv/netvsc_drv.c3
-rw-r--r--drivers/staging/hv/ring_buffer.c3
-rw-r--r--drivers/staging/hv/storvsc_api.h4
-rw-r--r--drivers/staging/hv/storvsc_drv.c11
-rw-r--r--drivers/staging/octeon/Kconfig2
-rw-r--r--drivers/staging/rt2860/usb_main_dev.c41
-rw-r--r--drivers/staging/sep/Kconfig10
-rw-r--r--drivers/staging/sep/Makefile2
-rw-r--r--drivers/staging/sep/TODO8
-rw-r--r--drivers/staging/sep/sep_dev.h110
-rw-r--r--drivers/staging/sep/sep_driver.c2742
-rw-r--r--drivers/staging/sep/sep_driver_api.h425
-rw-r--r--drivers/staging/sep/sep_driver_config.h225
-rw-r--r--drivers/staging/sep/sep_driver_hw_defs.h232
-rw-r--r--drivers/staging/spectra/Kconfig1
-rw-r--r--drivers/staging/spectra/ffsport.c30
-rw-r--r--drivers/staging/spectra/flash.c420
-rw-r--r--drivers/staging/ti-st/st.h1
-rw-r--r--drivers/staging/ti-st/st_core.c9
-rw-r--r--drivers/staging/ti-st/st_core.h2
-rw-r--r--drivers/staging/ti-st/st_kim.c22
-rw-r--r--drivers/staging/tm6000/Kconfig2
-rw-r--r--drivers/staging/tm6000/tm6000-input.c61
-rw-r--r--drivers/staging/vt6655/wpactl.c11
-rw-r--r--drivers/staging/wlan-ng/cfg80211.c3
-rw-r--r--drivers/staging/zram/zram_drv.c1
-rw-r--r--drivers/usb/atm/cxacru.c24
-rw-r--r--drivers/usb/class/cdc-acm.c23
-rw-r--r--drivers/usb/core/Kconfig6
-rw-r--r--drivers/usb/core/file.c35
-rw-r--r--drivers/usb/core/message.c23
-rw-r--r--drivers/usb/gadget/composite.c4
-rw-r--r--drivers/usb/gadget/m66592-udc.c1
-rw-r--r--drivers/usb/gadget/r8a66597-udc.c1
-rw-r--r--drivers/usb/gadget/rndis.c12
-rw-r--r--drivers/usb/gadget/rndis.h2
-rw-r--r--drivers/usb/gadget/s3c-hsotg.c2
-rw-r--r--drivers/usb/gadget/uvc_v4l2.c2
-rw-r--r--drivers/usb/host/ehci-pci.c5
-rw-r--r--drivers/usb/host/ehci-ppc-of.c12
-rw-r--r--drivers/usb/host/isp1760-hcd.c2
-rw-r--r--drivers/usb/host/xhci-ring.c6
-rw-r--r--drivers/usb/misc/adutux.c2
-rw-r--r--drivers/usb/misc/iowarrior.c4
-rw-r--r--drivers/usb/musb/cppi_dma.c1
-rw-r--r--drivers/usb/musb/musb_debugfs.c5
-rw-r--r--drivers/usb/musb/musb_gadget.c75
-rw-r--r--drivers/usb/musb/musb_gadget.h2
-rw-r--r--drivers/usb/musb/musb_gadget_ep0.c9
-rw-r--r--drivers/usb/musb/musb_host.c6
-rw-r--r--drivers/usb/otg/twl4030-usb.c74
-rw-r--r--drivers/usb/serial/cp210x.c15
-rw-r--r--drivers/usb/serial/ftdi_sio.c15
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h21
-rw-r--r--drivers/usb/serial/generic.c11
-rw-r--r--drivers/usb/serial/io_ti.c4
-rw-r--r--drivers/usb/serial/mos7720.c3
-rw-r--r--drivers/usb/serial/mos7840.c35
-rw-r--r--drivers/usb/serial/navman.c1
-rw-r--r--drivers/usb/serial/option.c126
-rw-r--r--drivers/usb/serial/pl2303.c3
-rw-r--r--drivers/usb/serial/pl2303.h4
-rw-r--r--drivers/usb/serial/ssu100.c258
-rw-r--r--drivers/usb/serial/usb-serial.c23
-rw-r--r--drivers/vhost/net.c2
-rw-r--r--drivers/vhost/vhost.c92
-rw-r--r--drivers/video/console/fbcon.c5
-rw-r--r--drivers/video/efifb.c103
-rw-r--r--drivers/video/pxa168fb.c10
-rw-r--r--drivers/video/sis/sis_main.c3
-rw-r--r--drivers/video/via/ioctl.c2
-rw-r--r--drivers/watchdog/Kconfig6
-rw-r--r--drivers/watchdog/sb_wdog.c12
-rw-r--r--drivers/watchdog/ts72xx_wdt.c3
-rw-r--r--drivers/xen/events.c21
-rw-r--r--drivers/xen/manage.c2
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c9
570 files changed, 8117 insertions, 8206 deletions
diff --git a/drivers/Makefile b/drivers/Makefile
index ae473445ad6d..a2aea53a75ed 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -50,7 +50,7 @@ obj-$(CONFIG_SPI) += spi/
50obj-y += net/ 50obj-y += net/
51obj-$(CONFIG_ATM) += atm/ 51obj-$(CONFIG_ATM) += atm/
52obj-$(CONFIG_FUSION) += message/ 52obj-$(CONFIG_FUSION) += message/
53obj-$(CONFIG_FIREWIRE) += firewire/ 53obj-y += firewire/
54obj-y += ieee1394/ 54obj-y += ieee1394/
55obj-$(CONFIG_UIO) += uio/ 55obj-$(CONFIG_UIO) += uio/
56obj-y += cdrom/ 56obj-y += cdrom/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index b811f2173f6f..88681aca88c5 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -105,7 +105,7 @@ config ACPI_EC_DEBUGFS
105 105
106 Be aware that using this interface can confuse your Embedded 106 Be aware that using this interface can confuse your Embedded
107 Controller in a way that a normal reboot is not enough. You then 107 Controller in a way that a normal reboot is not enough. You then
108 have to power of your system, and remove the laptop battery for 108 have to power off your system, and remove the laptop battery for
109 some seconds. 109 some seconds.
110 An Embedded Controller typically is available on laptops and reads 110 An Embedded Controller typically is available on laptops and reads
111 sensor values like battery state and temperature. 111 sensor values like battery state and temperature.
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index b76848c80be3..6b115f6c4313 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -382,31 +382,32 @@ static void acpi_pad_remove_sysfs(struct acpi_device *device)
382 device_remove_file(&device->dev, &dev_attr_rrtime); 382 device_remove_file(&device->dev, &dev_attr_rrtime);
383} 383}
384 384
385/* Query firmware how many CPUs should be idle */ 385/*
386static int acpi_pad_pur(acpi_handle handle, int *num_cpus) 386 * Query firmware how many CPUs should be idle
387 * return -1 on failure
388 */
389static int acpi_pad_pur(acpi_handle handle)
387{ 390{
388 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; 391 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
389 union acpi_object *package; 392 union acpi_object *package;
390 int rev, num, ret = -EINVAL; 393 int num = -1;
391 394
392 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_PUR", NULL, &buffer))) 395 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_PUR", NULL, &buffer)))
393 return -EINVAL; 396 return num;
394 397
395 if (!buffer.length || !buffer.pointer) 398 if (!buffer.length || !buffer.pointer)
396 return -EINVAL; 399 return num;
397 400
398 package = buffer.pointer; 401 package = buffer.pointer;
399 if (package->type != ACPI_TYPE_PACKAGE || package->package.count != 2) 402
400 goto out; 403 if (package->type == ACPI_TYPE_PACKAGE &&
401 rev = package->package.elements[0].integer.value; 404 package->package.count == 2 &&
402 num = package->package.elements[1].integer.value; 405 package->package.elements[0].integer.value == 1) /* rev 1 */
403 if (rev != 1 || num < 0) 406
404 goto out; 407 num = package->package.elements[1].integer.value;
405 *num_cpus = num; 408
406 ret = 0;
407out:
408 kfree(buffer.pointer); 409 kfree(buffer.pointer);
409 return ret; 410 return num;
410} 411}
411 412
412/* Notify firmware how many CPUs are idle */ 413/* Notify firmware how many CPUs are idle */
@@ -433,7 +434,8 @@ static void acpi_pad_handle_notify(acpi_handle handle)
433 uint32_t idle_cpus; 434 uint32_t idle_cpus;
434 435
435 mutex_lock(&isolated_cpus_lock); 436 mutex_lock(&isolated_cpus_lock);
436 if (acpi_pad_pur(handle, &num_cpus)) { 437 num_cpus = acpi_pad_pur(handle);
438 if (num_cpus < 0) {
437 mutex_unlock(&isolated_cpus_lock); 439 mutex_unlock(&isolated_cpus_lock);
438 return; 440 return;
439 } 441 }
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index df85b53a674f..7dad9160f209 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -854,6 +854,7 @@ struct acpi_bit_register_info {
854 ACPI_BITMASK_POWER_BUTTON_STATUS | \ 854 ACPI_BITMASK_POWER_BUTTON_STATUS | \
855 ACPI_BITMASK_SLEEP_BUTTON_STATUS | \ 855 ACPI_BITMASK_SLEEP_BUTTON_STATUS | \
856 ACPI_BITMASK_RT_CLOCK_STATUS | \ 856 ACPI_BITMASK_RT_CLOCK_STATUS | \
857 ACPI_BITMASK_PCIEXP_WAKE_DISABLE | \
857 ACPI_BITMASK_WAKE_STATUS) 858 ACPI_BITMASK_WAKE_STATUS)
858 859
859#define ACPI_BITMASK_TIMER_ENABLE 0x0001 860#define ACPI_BITMASK_TIMER_ENABLE 0x0001
diff --git a/drivers/acpi/acpica/exutils.c b/drivers/acpi/acpica/exutils.c
index 74c24d517f81..4093522eed45 100644
--- a/drivers/acpi/acpica/exutils.c
+++ b/drivers/acpi/acpica/exutils.c
@@ -109,7 +109,7 @@ void acpi_ex_enter_interpreter(void)
109 * 109 *
110 * DESCRIPTION: Reacquire the interpreter execution region from within the 110 * DESCRIPTION: Reacquire the interpreter execution region from within the
111 * interpreter code. Failure to enter the interpreter region is a 111 * interpreter code. Failure to enter the interpreter region is a
112 * fatal system error. Used in conjuction with 112 * fatal system error. Used in conjunction with
113 * relinquish_interpreter 113 * relinquish_interpreter
114 * 114 *
115 ******************************************************************************/ 115 ******************************************************************************/
diff --git a/drivers/acpi/acpica/rsutils.c b/drivers/acpi/acpica/rsutils.c
index 22cfcfbd9fff..491191e6cf69 100644
--- a/drivers/acpi/acpica/rsutils.c
+++ b/drivers/acpi/acpica/rsutils.c
@@ -149,7 +149,7 @@ acpi_rs_move_data(void *destination, void *source, u16 item_count, u8 move_type)
149 149
150 /* 150 /*
151 * 16-, 32-, and 64-bit cases must use the move macros that perform 151 * 16-, 32-, and 64-bit cases must use the move macros that perform
152 * endian conversion and/or accomodate hardware that cannot perform 152 * endian conversion and/or accommodate hardware that cannot perform
153 * misaligned memory transfers 153 * misaligned memory transfers
154 */ 154 */
155 case ACPI_RSC_MOVE16: 155 case ACPI_RSC_MOVE16:
diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig
index 907e350f1c7d..fca34ccfd294 100644
--- a/drivers/acpi/apei/Kconfig
+++ b/drivers/acpi/apei/Kconfig
@@ -34,6 +34,6 @@ config ACPI_APEI_ERST_DEBUG
34 depends on ACPI_APEI 34 depends on ACPI_APEI
35 help 35 help
36 ERST is a way provided by APEI to save and retrieve hardware 36 ERST is a way provided by APEI to save and retrieve hardware
37 error infomation to and from a persistent store. Enable this 37 error information to and from a persistent store. Enable this
38 if you want to debugging and testing the ERST kernel support 38 if you want to debugging and testing the ERST kernel support
39 and firmware implementation. 39 and firmware implementation.
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c
index 73fd0c7487c1..4a904a4bf05f 100644
--- a/drivers/acpi/apei/apei-base.c
+++ b/drivers/acpi/apei/apei-base.c
@@ -445,11 +445,15 @@ EXPORT_SYMBOL_GPL(apei_resources_sub);
445int apei_resources_request(struct apei_resources *resources, 445int apei_resources_request(struct apei_resources *resources,
446 const char *desc) 446 const char *desc)
447{ 447{
448 struct apei_res *res, *res_bak; 448 struct apei_res *res, *res_bak = NULL;
449 struct resource *r; 449 struct resource *r;
450 int rc;
450 451
451 apei_resources_sub(resources, &apei_resources_all); 452 rc = apei_resources_sub(resources, &apei_resources_all);
453 if (rc)
454 return rc;
452 455
456 rc = -EINVAL;
453 list_for_each_entry(res, &resources->iomem, list) { 457 list_for_each_entry(res, &resources->iomem, list) {
454 r = request_mem_region(res->start, res->end - res->start, 458 r = request_mem_region(res->start, res->end - res->start,
455 desc); 459 desc);
@@ -475,7 +479,11 @@ int apei_resources_request(struct apei_resources *resources,
475 } 479 }
476 } 480 }
477 481
478 apei_resources_merge(&apei_resources_all, resources); 482 rc = apei_resources_merge(&apei_resources_all, resources);
483 if (rc) {
484 pr_err(APEI_PFX "Fail to merge resources!\n");
485 goto err_unmap_ioport;
486 }
479 487
480 return 0; 488 return 0;
481err_unmap_ioport: 489err_unmap_ioport:
@@ -491,12 +499,13 @@ err_unmap_iomem:
491 break; 499 break;
492 release_mem_region(res->start, res->end - res->start); 500 release_mem_region(res->start, res->end - res->start);
493 } 501 }
494 return -EINVAL; 502 return rc;
495} 503}
496EXPORT_SYMBOL_GPL(apei_resources_request); 504EXPORT_SYMBOL_GPL(apei_resources_request);
497 505
498void apei_resources_release(struct apei_resources *resources) 506void apei_resources_release(struct apei_resources *resources)
499{ 507{
508 int rc;
500 struct apei_res *res; 509 struct apei_res *res;
501 510
502 list_for_each_entry(res, &resources->iomem, list) 511 list_for_each_entry(res, &resources->iomem, list)
@@ -504,7 +513,9 @@ void apei_resources_release(struct apei_resources *resources)
504 list_for_each_entry(res, &resources->ioport, list) 513 list_for_each_entry(res, &resources->ioport, list)
505 release_region(res->start, res->end - res->start); 514 release_region(res->start, res->end - res->start);
506 515
507 apei_resources_sub(&apei_resources_all, resources); 516 rc = apei_resources_sub(&apei_resources_all, resources);
517 if (rc)
518 pr_err(APEI_PFX "Fail to sub resources!\n");
508} 519}
509EXPORT_SYMBOL_GPL(apei_resources_release); 520EXPORT_SYMBOL_GPL(apei_resources_release);
510 521
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c
index 465c885938ee..cf29df69380b 100644
--- a/drivers/acpi/apei/einj.c
+++ b/drivers/acpi/apei/einj.c
@@ -426,7 +426,9 @@ DEFINE_SIMPLE_ATTRIBUTE(error_inject_fops, NULL,
426 426
427static int einj_check_table(struct acpi_table_einj *einj_tab) 427static int einj_check_table(struct acpi_table_einj *einj_tab)
428{ 428{
429 if (einj_tab->header_length != sizeof(struct acpi_table_einj)) 429 if ((einj_tab->header_length !=
430 (sizeof(struct acpi_table_einj) - sizeof(einj_tab->header)))
431 && (einj_tab->header_length != sizeof(struct acpi_table_einj)))
430 return -EINVAL; 432 return -EINVAL;
431 if (einj_tab->header.length < sizeof(struct acpi_table_einj)) 433 if (einj_tab->header.length < sizeof(struct acpi_table_einj))
432 return -EINVAL; 434 return -EINVAL;
diff --git a/drivers/acpi/apei/erst-dbg.c b/drivers/acpi/apei/erst-dbg.c
index 5281ddda2777..da1228a9a544 100644
--- a/drivers/acpi/apei/erst-dbg.c
+++ b/drivers/acpi/apei/erst-dbg.c
@@ -2,7 +2,7 @@
2 * APEI Error Record Serialization Table debug support 2 * APEI Error Record Serialization Table debug support
3 * 3 *
4 * ERST is a way provided by APEI to save and retrieve hardware error 4 * ERST is a way provided by APEI to save and retrieve hardware error
5 * infomation to and from a persistent store. This file provide the 5 * information to and from a persistent store. This file provide the
6 * debugging/testing support for ERST kernel support and firmware 6 * debugging/testing support for ERST kernel support and firmware
7 * implementation. 7 * implementation.
8 * 8 *
@@ -111,11 +111,13 @@ retry:
111 goto out; 111 goto out;
112 } 112 }
113 if (len > erst_dbg_buf_len) { 113 if (len > erst_dbg_buf_len) {
114 kfree(erst_dbg_buf); 114 void *p;
115 rc = -ENOMEM; 115 rc = -ENOMEM;
116 erst_dbg_buf = kmalloc(len, GFP_KERNEL); 116 p = kmalloc(len, GFP_KERNEL);
117 if (!erst_dbg_buf) 117 if (!p)
118 goto out; 118 goto out;
119 kfree(erst_dbg_buf);
120 erst_dbg_buf = p;
119 erst_dbg_buf_len = len; 121 erst_dbg_buf_len = len;
120 goto retry; 122 goto retry;
121 } 123 }
@@ -150,11 +152,13 @@ static ssize_t erst_dbg_write(struct file *filp, const char __user *ubuf,
150 if (mutex_lock_interruptible(&erst_dbg_mutex)) 152 if (mutex_lock_interruptible(&erst_dbg_mutex))
151 return -EINTR; 153 return -EINTR;
152 if (usize > erst_dbg_buf_len) { 154 if (usize > erst_dbg_buf_len) {
153 kfree(erst_dbg_buf); 155 void *p;
154 rc = -ENOMEM; 156 rc = -ENOMEM;
155 erst_dbg_buf = kmalloc(usize, GFP_KERNEL); 157 p = kmalloc(usize, GFP_KERNEL);
156 if (!erst_dbg_buf) 158 if (!p)
157 goto out; 159 goto out;
160 kfree(erst_dbg_buf);
161 erst_dbg_buf = p;
158 erst_dbg_buf_len = usize; 162 erst_dbg_buf_len = usize;
159 } 163 }
160 rc = copy_from_user(erst_dbg_buf, ubuf, usize); 164 rc = copy_from_user(erst_dbg_buf, ubuf, usize);
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index 18645f4e83cd..1211c03149e8 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -2,7 +2,7 @@
2 * APEI Error Record Serialization Table support 2 * APEI Error Record Serialization Table support
3 * 3 *
4 * ERST is a way provided by APEI to save and retrieve hardware error 4 * ERST is a way provided by APEI to save and retrieve hardware error
5 * infomation to and from a persistent store. 5 * information to and from a persistent store.
6 * 6 *
7 * For more information about ERST, please refer to ACPI Specification 7 * For more information about ERST, please refer to ACPI Specification
8 * version 4.0, section 17.4. 8 * version 4.0, section 17.4.
@@ -266,13 +266,30 @@ static int erst_exec_move_data(struct apei_exec_context *ctx,
266{ 266{
267 int rc; 267 int rc;
268 u64 offset; 268 u64 offset;
269 void *src, *dst;
270
271 /* ioremap does not work in interrupt context */
272 if (in_interrupt()) {
273 pr_warning(ERST_PFX
274 "MOVE_DATA can not be used in interrupt context");
275 return -EBUSY;
276 }
269 277
270 rc = __apei_exec_read_register(entry, &offset); 278 rc = __apei_exec_read_register(entry, &offset);
271 if (rc) 279 if (rc)
272 return rc; 280 return rc;
273 memmove((void *)ctx->dst_base + offset, 281
274 (void *)ctx->src_base + offset, 282 src = ioremap(ctx->src_base + offset, ctx->var2);
275 ctx->var2); 283 if (!src)
284 return -ENOMEM;
285 dst = ioremap(ctx->dst_base + offset, ctx->var2);
286 if (!dst)
287 return -ENOMEM;
288
289 memmove(dst, src, ctx->var2);
290
291 iounmap(src);
292 iounmap(dst);
276 293
277 return 0; 294 return 0;
278} 295}
@@ -750,7 +767,9 @@ __setup("erst_disable", setup_erst_disable);
750 767
751static int erst_check_table(struct acpi_table_erst *erst_tab) 768static int erst_check_table(struct acpi_table_erst *erst_tab)
752{ 769{
753 if (erst_tab->header_length != sizeof(struct acpi_table_erst)) 770 if ((erst_tab->header_length !=
771 (sizeof(struct acpi_table_erst) - sizeof(erst_tab->header)))
772 && (erst_tab->header_length != sizeof(struct acpi_table_einj)))
754 return -EINVAL; 773 return -EINVAL;
755 if (erst_tab->header.length < sizeof(struct acpi_table_erst)) 774 if (erst_tab->header.length < sizeof(struct acpi_table_erst))
756 return -EINVAL; 775 return -EINVAL;
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 385a6059714a..0d505e59214d 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -302,7 +302,7 @@ static int __devinit ghes_probe(struct platform_device *ghes_dev)
302 struct ghes *ghes = NULL; 302 struct ghes *ghes = NULL;
303 int rc = -EINVAL; 303 int rc = -EINVAL;
304 304
305 generic = ghes_dev->dev.platform_data; 305 generic = *(struct acpi_hest_generic **)ghes_dev->dev.platform_data;
306 if (!generic->enabled) 306 if (!generic->enabled)
307 return -ENODEV; 307 return -ENODEV;
308 308
diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c
index 343168d18266..1a3508a7fe03 100644
--- a/drivers/acpi/apei/hest.c
+++ b/drivers/acpi/apei/hest.c
@@ -137,20 +137,23 @@ static int hest_parse_ghes_count(struct acpi_hest_header *hest_hdr, void *data)
137 137
138static int hest_parse_ghes(struct acpi_hest_header *hest_hdr, void *data) 138static int hest_parse_ghes(struct acpi_hest_header *hest_hdr, void *data)
139{ 139{
140 struct acpi_hest_generic *generic;
141 struct platform_device *ghes_dev; 140 struct platform_device *ghes_dev;
142 struct ghes_arr *ghes_arr = data; 141 struct ghes_arr *ghes_arr = data;
143 int rc; 142 int rc;
144 143
145 if (hest_hdr->type != ACPI_HEST_TYPE_GENERIC_ERROR) 144 if (hest_hdr->type != ACPI_HEST_TYPE_GENERIC_ERROR)
146 return 0; 145 return 0;
147 generic = (struct acpi_hest_generic *)hest_hdr; 146
148 if (!generic->enabled) 147 if (!((struct acpi_hest_generic *)hest_hdr)->enabled)
149 return 0; 148 return 0;
150 ghes_dev = platform_device_alloc("GHES", hest_hdr->source_id); 149 ghes_dev = platform_device_alloc("GHES", hest_hdr->source_id);
151 if (!ghes_dev) 150 if (!ghes_dev)
152 return -ENOMEM; 151 return -ENOMEM;
153 ghes_dev->dev.platform_data = generic; 152
153 rc = platform_device_add_data(ghes_dev, &hest_hdr, sizeof(void *));
154 if (rc)
155 goto err;
156
154 rc = platform_device_add(ghes_dev); 157 rc = platform_device_add(ghes_dev);
155 if (rc) 158 if (rc)
156 goto err; 159 goto err;
diff --git a/drivers/acpi/atomicio.c b/drivers/acpi/atomicio.c
index 8f8bd736d4ff..542e53903891 100644
--- a/drivers/acpi/atomicio.c
+++ b/drivers/acpi/atomicio.c
@@ -142,7 +142,7 @@ static void __iomem *acpi_pre_map(phys_addr_t paddr,
142 list_add_tail_rcu(&map->list, &acpi_iomaps); 142 list_add_tail_rcu(&map->list, &acpi_iomaps);
143 spin_unlock_irqrestore(&acpi_iomaps_lock, flags); 143 spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
144 144
145 return vaddr + (paddr - pg_off); 145 return map->vaddr + (paddr - map->paddr);
146err_unmap: 146err_unmap:
147 iounmap(vaddr); 147 iounmap(vaddr);
148 return NULL; 148 return NULL;
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index dc58402b0a17..98417201e9ce 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -273,7 +273,6 @@ static enum power_supply_property energy_battery_props[] = {
273 POWER_SUPPLY_PROP_CYCLE_COUNT, 273 POWER_SUPPLY_PROP_CYCLE_COUNT,
274 POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN, 274 POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
275 POWER_SUPPLY_PROP_VOLTAGE_NOW, 275 POWER_SUPPLY_PROP_VOLTAGE_NOW,
276 POWER_SUPPLY_PROP_CURRENT_NOW,
277 POWER_SUPPLY_PROP_POWER_NOW, 276 POWER_SUPPLY_PROP_POWER_NOW,
278 POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN, 277 POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN,
279 POWER_SUPPLY_PROP_ENERGY_FULL, 278 POWER_SUPPLY_PROP_ENERGY_FULL,
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index 2bb28b9d91c4..af308d03f492 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -183,6 +183,8 @@ static int __init dmi_disable_osi_vista(const struct dmi_system_id *d)
183{ 183{
184 printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident); 184 printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident);
185 acpi_osi_setup("!Windows 2006"); 185 acpi_osi_setup("!Windows 2006");
186 acpi_osi_setup("!Windows 2006 SP1");
187 acpi_osi_setup("!Windows 2006 SP2");
186 return 0; 188 return 0;
187} 189}
188static int __init dmi_disable_osi_win7(const struct dmi_system_id *d) 190static int __init dmi_disable_osi_win7(const struct dmi_system_id *d)
@@ -202,6 +204,23 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
202 }, 204 },
203 }, 205 },
204 { 206 {
207 /*
208 * There have a NVIF method in MSI GX723 DSDT need call by Nvidia
209 * driver (e.g. nouveau) when user press brightness hotkey.
210 * Currently, nouveau driver didn't do the job and it causes there
211 * have a infinite while loop in DSDT when user press hotkey.
212 * We add MSI GX723's dmi information to this table for workaround
213 * this issue.
214 * Will remove MSI GX723 from the table after nouveau grows support.
215 */
216 .callback = dmi_disable_osi_vista,
217 .ident = "MSI GX723",
218 .matches = {
219 DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star International"),
220 DMI_MATCH(DMI_PRODUCT_NAME, "GX723"),
221 },
222 },
223 {
205 .callback = dmi_disable_osi_vista, 224 .callback = dmi_disable_osi_vista,
206 .ident = "Sony VGN-NS10J_S", 225 .ident = "Sony VGN-NS10J_S",
207 .matches = { 226 .matches = {
@@ -226,6 +245,14 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
226 }, 245 },
227 }, 246 },
228 { 247 {
248 .callback = dmi_disable_osi_vista,
249 .ident = "Toshiba Satellite L355",
250 .matches = {
251 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
252 DMI_MATCH(DMI_PRODUCT_VERSION, "Satellite L355"),
253 },
254 },
255 {
229 .callback = dmi_disable_osi_win7, 256 .callback = dmi_disable_osi_win7,
230 .ident = "ASUS K50IJ", 257 .ident = "ASUS K50IJ",
231 .matches = { 258 .matches = {
@@ -233,6 +260,14 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
233 DMI_MATCH(DMI_PRODUCT_NAME, "K50IJ"), 260 DMI_MATCH(DMI_PRODUCT_NAME, "K50IJ"),
234 }, 261 },
235 }, 262 },
263 {
264 .callback = dmi_disable_osi_vista,
265 .ident = "Toshiba P305D",
266 .matches = {
267 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
268 DMI_MATCH(DMI_PRODUCT_NAME, "Satellite P305D"),
269 },
270 },
236 271
237 /* 272 /*
238 * BIOS invocation of _OSI(Linux) is almost always a BIOS bug. 273 * BIOS invocation of _OSI(Linux) is almost always a BIOS bug.
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 5c221ab535d5..310e3b9749cb 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -55,7 +55,7 @@ EXPORT_SYMBOL(acpi_root_dir);
55static int set_power_nocheck(const struct dmi_system_id *id) 55static int set_power_nocheck(const struct dmi_system_id *id)
56{ 56{
57 printk(KERN_NOTICE PREFIX "%s detected - " 57 printk(KERN_NOTICE PREFIX "%s detected - "
58 "disable power check in power transistion\n", id->ident); 58 "disable power check in power transition\n", id->ident);
59 acpi_power_nocheck = 1; 59 acpi_power_nocheck = 1;
60 return 0; 60 return 0;
61} 61}
@@ -80,23 +80,15 @@ static int set_copy_dsdt(const struct dmi_system_id *id)
80 80
81static struct dmi_system_id dsdt_dmi_table[] __initdata = { 81static struct dmi_system_id dsdt_dmi_table[] __initdata = {
82 /* 82 /*
83 * Insyde BIOS on some TOSHIBA machines corrupt the DSDT. 83 * Invoke DSDT corruption work-around on all Toshiba Satellite.
84 * https://bugzilla.kernel.org/show_bug.cgi?id=14679 84 * https://bugzilla.kernel.org/show_bug.cgi?id=14679
85 */ 85 */
86 { 86 {
87 .callback = set_copy_dsdt, 87 .callback = set_copy_dsdt,
88 .ident = "TOSHIBA Satellite A505", 88 .ident = "TOSHIBA Satellite",
89 .matches = { 89 .matches = {
90 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), 90 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
91 DMI_MATCH(DMI_PRODUCT_NAME, "Satellite A505"), 91 DMI_MATCH(DMI_PRODUCT_NAME, "Satellite"),
92 },
93 },
94 {
95 .callback = set_copy_dsdt,
96 .ident = "TOSHIBA Satellite L505D",
97 .matches = {
98 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
99 DMI_MATCH(DMI_PRODUCT_NAME, "Satellite L505D"),
100 }, 92 },
101 }, 93 },
102 {} 94 {}
@@ -1027,7 +1019,7 @@ static int __init acpi_init(void)
1027 1019
1028 /* 1020 /*
1029 * If the laptop falls into the DMI check table, the power state check 1021 * If the laptop falls into the DMI check table, the power state check
1030 * will be disabled in the course of device power transistion. 1022 * will be disabled in the course of device power transition.
1031 */ 1023 */
1032 dmi_check_system(power_nocheck_dmi_table); 1024 dmi_check_system(power_nocheck_dmi_table);
1033 1025
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index 8a3b840c0bb2..d94d2953c974 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -369,7 +369,9 @@ static void __exit acpi_fan_exit(void)
369 369
370 acpi_bus_unregister_driver(&acpi_fan_driver); 370 acpi_bus_unregister_driver(&acpi_fan_driver);
371 371
372#ifdef CONFIG_ACPI_PROCFS
372 remove_proc_entry(ACPI_FAN_CLASS, acpi_root_dir); 373 remove_proc_entry(ACPI_FAN_CLASS, acpi_root_dir);
374#endif
373 375
374 return; 376 return;
375} 377}
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 1f67057af2a5..3ba8d1f44a73 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -33,7 +33,6 @@
33#include <linux/pm_runtime.h> 33#include <linux/pm_runtime.h>
34#include <linux/pci.h> 34#include <linux/pci.h>
35#include <linux/pci-acpi.h> 35#include <linux/pci-acpi.h>
36#include <linux/pci-aspm.h>
37#include <linux/acpi.h> 36#include <linux/acpi.h>
38#include <linux/slab.h> 37#include <linux/slab.h>
39#include <acpi/acpi_bus.h> 38#include <acpi/acpi_bus.h>
@@ -226,22 +225,31 @@ static acpi_status acpi_pci_run_osc(acpi_handle handle,
226 return status; 225 return status;
227} 226}
228 227
229static acpi_status acpi_pci_query_osc(struct acpi_pci_root *root, u32 flags) 228static acpi_status acpi_pci_query_osc(struct acpi_pci_root *root,
229 u32 support,
230 u32 *control)
230{ 231{
231 acpi_status status; 232 acpi_status status;
232 u32 support_set, result, capbuf[3]; 233 u32 result, capbuf[3];
234
235 support &= OSC_PCI_SUPPORT_MASKS;
236 support |= root->osc_support_set;
233 237
234 /* do _OSC query for all possible controls */
235 support_set = root->osc_support_set | (flags & OSC_PCI_SUPPORT_MASKS);
236 capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE; 238 capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE;
237 capbuf[OSC_SUPPORT_TYPE] = support_set; 239 capbuf[OSC_SUPPORT_TYPE] = support;
238 capbuf[OSC_CONTROL_TYPE] = OSC_PCI_CONTROL_MASKS; 240 if (control) {
241 *control &= OSC_PCI_CONTROL_MASKS;
242 capbuf[OSC_CONTROL_TYPE] = *control | root->osc_control_set;
243 } else {
244 /* Run _OSC query for all possible controls. */
245 capbuf[OSC_CONTROL_TYPE] = OSC_PCI_CONTROL_MASKS;
246 }
239 247
240 status = acpi_pci_run_osc(root->device->handle, capbuf, &result); 248 status = acpi_pci_run_osc(root->device->handle, capbuf, &result);
241 if (ACPI_SUCCESS(status)) { 249 if (ACPI_SUCCESS(status)) {
242 root->osc_support_set = support_set; 250 root->osc_support_set = support;
243 root->osc_control_qry = result; 251 if (control)
244 root->osc_queried = 1; 252 *control = result;
245 } 253 }
246 return status; 254 return status;
247} 255}
@@ -255,7 +263,7 @@ static acpi_status acpi_pci_osc_support(struct acpi_pci_root *root, u32 flags)
255 if (ACPI_FAILURE(status)) 263 if (ACPI_FAILURE(status))
256 return status; 264 return status;
257 mutex_lock(&osc_lock); 265 mutex_lock(&osc_lock);
258 status = acpi_pci_query_osc(root, flags); 266 status = acpi_pci_query_osc(root, flags, NULL);
259 mutex_unlock(&osc_lock); 267 mutex_unlock(&osc_lock);
260 return status; 268 return status;
261} 269}
@@ -365,55 +373,70 @@ out:
365EXPORT_SYMBOL_GPL(acpi_get_pci_dev); 373EXPORT_SYMBOL_GPL(acpi_get_pci_dev);
366 374
367/** 375/**
368 * acpi_pci_osc_control_set - commit requested control to Firmware 376 * acpi_pci_osc_control_set - Request control of PCI root _OSC features.
369 * @handle: acpi_handle for the target ACPI object 377 * @handle: ACPI handle of a PCI root bridge (or PCIe Root Complex).
370 * @flags: driver's requested control bits 378 * @mask: Mask of _OSC bits to request control of, place to store control mask.
379 * @req: Mask of _OSC bits the control of is essential to the caller.
380 *
381 * Run _OSC query for @mask and if that is successful, compare the returned
382 * mask of control bits with @req. If all of the @req bits are set in the
383 * returned mask, run _OSC request for it.
371 * 384 *
372 * Attempt to take control from Firmware on requested control bits. 385 * The variable at the @mask address may be modified regardless of whether or
386 * not the function returns success. On success it will contain the mask of
387 * _OSC bits the BIOS has granted control of, but its contents are meaningless
388 * on failure.
373 **/ 389 **/
374acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 flags) 390acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 *mask, u32 req)
375{ 391{
392 struct acpi_pci_root *root;
376 acpi_status status; 393 acpi_status status;
377 u32 control_req, result, capbuf[3]; 394 u32 ctrl, capbuf[3];
378 acpi_handle tmp; 395 acpi_handle tmp;
379 struct acpi_pci_root *root;
380 396
381 status = acpi_get_handle(handle, "_OSC", &tmp); 397 if (!mask)
382 if (ACPI_FAILURE(status)) 398 return AE_BAD_PARAMETER;
383 return status;
384 399
385 control_req = (flags & OSC_PCI_CONTROL_MASKS); 400 ctrl = *mask & OSC_PCI_CONTROL_MASKS;
386 if (!control_req) 401 if ((ctrl & req) != req)
387 return AE_TYPE; 402 return AE_TYPE;
388 403
389 root = acpi_pci_find_root(handle); 404 root = acpi_pci_find_root(handle);
390 if (!root) 405 if (!root)
391 return AE_NOT_EXIST; 406 return AE_NOT_EXIST;
392 407
408 status = acpi_get_handle(handle, "_OSC", &tmp);
409 if (ACPI_FAILURE(status))
410 return status;
411
393 mutex_lock(&osc_lock); 412 mutex_lock(&osc_lock);
413
414 *mask = ctrl | root->osc_control_set;
394 /* No need to evaluate _OSC if the control was already granted. */ 415 /* No need to evaluate _OSC if the control was already granted. */
395 if ((root->osc_control_set & control_req) == control_req) 416 if ((root->osc_control_set & ctrl) == ctrl)
396 goto out; 417 goto out;
397 418
398 /* Need to query controls first before requesting them */ 419 /* Need to check the available controls bits before requesting them. */
399 if (!root->osc_queried) { 420 while (*mask) {
400 status = acpi_pci_query_osc(root, root->osc_support_set); 421 status = acpi_pci_query_osc(root, root->osc_support_set, mask);
401 if (ACPI_FAILURE(status)) 422 if (ACPI_FAILURE(status))
402 goto out; 423 goto out;
424 if (ctrl == *mask)
425 break;
426 ctrl = *mask;
403 } 427 }
404 if ((root->osc_control_qry & control_req) != control_req) { 428
405 printk(KERN_DEBUG 429 if ((ctrl & req) != req) {
406 "Firmware did not grant requested _OSC control\n");
407 status = AE_SUPPORT; 430 status = AE_SUPPORT;
408 goto out; 431 goto out;
409 } 432 }
410 433
411 capbuf[OSC_QUERY_TYPE] = 0; 434 capbuf[OSC_QUERY_TYPE] = 0;
412 capbuf[OSC_SUPPORT_TYPE] = root->osc_support_set; 435 capbuf[OSC_SUPPORT_TYPE] = root->osc_support_set;
413 capbuf[OSC_CONTROL_TYPE] = root->osc_control_set | control_req; 436 capbuf[OSC_CONTROL_TYPE] = ctrl;
414 status = acpi_pci_run_osc(handle, capbuf, &result); 437 status = acpi_pci_run_osc(handle, capbuf, mask);
415 if (ACPI_SUCCESS(status)) 438 if (ACPI_SUCCESS(status))
416 root->osc_control_set = result; 439 root->osc_control_set = *mask;
417out: 440out:
418 mutex_unlock(&osc_lock); 441 mutex_unlock(&osc_lock);
419 return status; 442 return status;
@@ -544,14 +567,6 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
544 if (flags != base_flags) 567 if (flags != base_flags)
545 acpi_pci_osc_support(root, flags); 568 acpi_pci_osc_support(root, flags);
546 569
547 status = acpi_pci_osc_control_set(root->device->handle,
548 OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
549
550 if (ACPI_FAILURE(status)) {
551 printk(KERN_INFO "Unable to assume PCIe control: Disabling ASPM\n");
552 pcie_no_aspm();
553 }
554
555 pci_acpi_add_bus_pm_notifier(device, root->bus); 570 pci_acpi_add_bus_pm_notifier(device, root->bus);
556 if (device->wakeup.flags.run_wake) 571 if (device->wakeup.flags.run_wake)
557 device_set_run_wake(root->bus->bridge, true); 572 device_set_run_wake(root->bus->bridge, true);
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index e9699aaed109..bec561c14beb 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -29,12 +29,6 @@ static int set_no_mwait(const struct dmi_system_id *id)
29 29
30static struct dmi_system_id __cpuinitdata processor_idle_dmi_table[] = { 30static struct dmi_system_id __cpuinitdata processor_idle_dmi_table[] = {
31 { 31 {
32 set_no_mwait, "IFL91 board", {
33 DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"),
34 DMI_MATCH(DMI_SYS_VENDOR, "ZEPTO"),
35 DMI_MATCH(DMI_PRODUCT_VERSION, "3215W"),
36 DMI_MATCH(DMI_BOARD_NAME, "IFL91") }, NULL},
37 {
38 set_no_mwait, "Extensa 5220", { 32 set_no_mwait, "Extensa 5220", {
39 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), 33 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
40 DMI_MATCH(DMI_SYS_VENDOR, "Acer"), 34 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
@@ -352,4 +346,5 @@ void __init acpi_early_processor_set_pdc(void)
352 acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, 346 acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
353 ACPI_UINT32_MAX, 347 ACPI_UINT32_MAX,
354 early_init_pdc, NULL, NULL, NULL); 348 early_init_pdc, NULL, NULL, NULL);
349 acpi_get_devices("ACPI0007", early_init_pdc, NULL, NULL);
355} 350}
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index 156021892389..347eb21b2353 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -850,7 +850,7 @@ static int __init acpi_processor_init(void)
850 printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n", 850 printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n",
851 acpi_idle_driver.name); 851 acpi_idle_driver.name);
852 } else { 852 } else {
853 printk(KERN_DEBUG "ACPI: acpi_idle yielding to %s", 853 printk(KERN_DEBUG "ACPI: acpi_idle yielding to %s\n",
854 cpuidle_get_driver()->name); 854 cpuidle_get_driver()->name);
855 } 855 }
856 856
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index ba1bd263d903..3a73a93596e8 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -447,8 +447,8 @@ int acpi_processor_notify_smm(struct module *calling_module)
447 if (!try_module_get(calling_module)) 447 if (!try_module_get(calling_module))
448 return -EINVAL; 448 return -EINVAL;
449 449
450 /* is_done is set to negative if an error occured, 450 /* is_done is set to negative if an error occurred,
451 * and to postitive if _no_ error occured, but SMM 451 * and to postitive if _no_ error occurred, but SMM
452 * was already notified. This avoids double notification 452 * was already notified. This avoids double notification
453 * which might lead to unexpected results... 453 * which might lead to unexpected results...
454 */ 454 */
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index cf82989ae756..4754ff6e70e6 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -363,6 +363,12 @@ static int __init init_old_suspend_ordering(const struct dmi_system_id *d)
363 return 0; 363 return 0;
364} 364}
365 365
366static int __init init_nvs_nosave(const struct dmi_system_id *d)
367{
368 acpi_nvs_nosave();
369 return 0;
370}
371
366static struct dmi_system_id __initdata acpisleep_dmi_table[] = { 372static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
367 { 373 {
368 .callback = init_old_suspend_ordering, 374 .callback = init_old_suspend_ordering,
@@ -397,6 +403,22 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
397 DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"), 403 DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"),
398 }, 404 },
399 }, 405 },
406 {
407 .callback = init_nvs_nosave,
408 .ident = "Sony Vaio VGN-SR11M",
409 .matches = {
410 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
411 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR11M"),
412 },
413 },
414 {
415 .callback = init_nvs_nosave,
416 .ident = "Everex StepNote Series",
417 .matches = {
418 DMI_MATCH(DMI_SYS_VENDOR, "Everex Systems, Inc."),
419 DMI_MATCH(DMI_PRODUCT_NAME, "Everex StepNote Series"),
420 },
421 },
400 {}, 422 {},
401}; 423};
402#endif /* CONFIG_SUSPEND */ 424#endif /* CONFIG_SUSPEND */
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 68e2e4582fa2..f8588f81048a 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -100,7 +100,7 @@ static const struct acpi_dlevel acpi_debug_levels[] = {
100 ACPI_DEBUG_INIT(ACPI_LV_EVENTS), 100 ACPI_DEBUG_INIT(ACPI_LV_EVENTS),
101}; 101};
102 102
103static int param_get_debug_layer(char *buffer, struct kernel_param *kp) 103static int param_get_debug_layer(char *buffer, const struct kernel_param *kp)
104{ 104{
105 int result = 0; 105 int result = 0;
106 int i; 106 int i;
@@ -128,7 +128,7 @@ static int param_get_debug_layer(char *buffer, struct kernel_param *kp)
128 return result; 128 return result;
129} 129}
130 130
131static int param_get_debug_level(char *buffer, struct kernel_param *kp) 131static int param_get_debug_level(char *buffer, const struct kernel_param *kp)
132{ 132{
133 int result = 0; 133 int result = 0;
134 int i; 134 int i;
@@ -149,10 +149,18 @@ static int param_get_debug_level(char *buffer, struct kernel_param *kp)
149 return result; 149 return result;
150} 150}
151 151
152module_param_call(debug_layer, param_set_uint, param_get_debug_layer, 152static struct kernel_param_ops param_ops_debug_layer = {
153 &acpi_dbg_layer, 0644); 153 .set = param_set_uint,
154module_param_call(debug_level, param_set_uint, param_get_debug_level, 154 .get = param_get_debug_layer,
155 &acpi_dbg_level, 0644); 155};
156
157static struct kernel_param_ops param_ops_debug_level = {
158 .set = param_set_uint,
159 .get = param_get_debug_level,
160};
161
162module_param_cb(debug_layer, &param_ops_debug_layer, &acpi_dbg_layer, 0644);
163module_param_cb(debug_level, &param_ops_debug_level, &acpi_dbg_level, 0644);
156 164
157static char trace_method_name[6]; 165static char trace_method_name[6];
158module_param_string(trace_method_name, trace_method_name, 6, 0644); 166module_param_string(trace_method_name, trace_method_name, 6, 0644);
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index c5fef01b3c95..b83676126598 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -59,8 +59,8 @@ acpi_backlight_cap_match(acpi_handle handle, u32 level, void *context,
59 "support\n")); 59 "support\n"));
60 *cap |= ACPI_VIDEO_BACKLIGHT; 60 *cap |= ACPI_VIDEO_BACKLIGHT;
61 if (ACPI_FAILURE(acpi_get_handle(handle, "_BQC", &h_dummy))) 61 if (ACPI_FAILURE(acpi_get_handle(handle, "_BQC", &h_dummy)))
62 printk(KERN_WARNING FW_BUG PREFIX "ACPI brightness " 62 printk(KERN_WARNING FW_BUG PREFIX "No _BQC method, "
63 "control misses _BQC function\n"); 63 "cannot determine initial brightness\n");
64 /* We have backlight support, no need to scan further */ 64 /* We have backlight support, no need to scan further */
65 return AE_CTRL_TERMINATE; 65 return AE_CTRL_TERMINATE;
66 } 66 }
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 65e3e2708371..11ec911016c6 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -828,6 +828,7 @@ config PATA_SAMSUNG_CF
828config PATA_WINBOND_VLB 828config PATA_WINBOND_VLB
829 tristate "Winbond W83759A VLB PATA support (Experimental)" 829 tristate "Winbond W83759A VLB PATA support (Experimental)"
830 depends on ISA && EXPERIMENTAL 830 depends on ISA && EXPERIMENTAL
831 select PATA_LEGACY
831 help 832 help
832 Support for the Winbond W83759A controller on Vesa Local Bus 833 Support for the Winbond W83759A controller on Vesa Local Bus
833 systems. 834 systems.
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index 158eaa961b1e..d5df04a395ca 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -89,7 +89,6 @@ obj-$(CONFIG_PATA_QDI) += pata_qdi.o
89obj-$(CONFIG_PATA_RB532) += pata_rb532_cf.o 89obj-$(CONFIG_PATA_RB532) += pata_rb532_cf.o
90obj-$(CONFIG_PATA_RZ1000) += pata_rz1000.o 90obj-$(CONFIG_PATA_RZ1000) += pata_rz1000.o
91obj-$(CONFIG_PATA_SAMSUNG_CF) += pata_samsung_cf.o 91obj-$(CONFIG_PATA_SAMSUNG_CF) += pata_samsung_cf.o
92obj-$(CONFIG_PATA_WINBOND_VLB) += pata_winbond.o
93 92
94obj-$(CONFIG_PATA_PXA) += pata_pxa.o 93obj-$(CONFIG_PATA_PXA) += pata_pxa.o
95 94
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index fe75d8befc3a..99d0e5a51148 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -60,6 +60,7 @@ enum board_ids {
60 board_ahci, 60 board_ahci,
61 board_ahci_ign_iferr, 61 board_ahci_ign_iferr,
62 board_ahci_nosntf, 62 board_ahci_nosntf,
63 board_ahci_yes_fbs,
63 64
64 /* board IDs for specific chipsets in alphabetical order */ 65 /* board IDs for specific chipsets in alphabetical order */
65 board_ahci_mcp65, 66 board_ahci_mcp65,
@@ -89,6 +90,10 @@ static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
89static int ahci_pci_device_resume(struct pci_dev *pdev); 90static int ahci_pci_device_resume(struct pci_dev *pdev);
90#endif 91#endif
91 92
93static struct scsi_host_template ahci_sht = {
94 AHCI_SHT("ahci"),
95};
96
92static struct ata_port_operations ahci_vt8251_ops = { 97static struct ata_port_operations ahci_vt8251_ops = {
93 .inherits = &ahci_ops, 98 .inherits = &ahci_ops,
94 .hardreset = ahci_vt8251_hardreset, 99 .hardreset = ahci_vt8251_hardreset,
@@ -132,6 +137,14 @@ static const struct ata_port_info ahci_port_info[] = {
132 .udma_mask = ATA_UDMA6, 137 .udma_mask = ATA_UDMA6,
133 .port_ops = &ahci_ops, 138 .port_ops = &ahci_ops,
134 }, 139 },
140 [board_ahci_yes_fbs] =
141 {
142 AHCI_HFLAGS (AHCI_HFLAG_YES_FBS),
143 .flags = AHCI_FLAG_COMMON,
144 .pio_mask = ATA_PIO4,
145 .udma_mask = ATA_UDMA6,
146 .port_ops = &ahci_ops,
147 },
135 /* by chipsets */ 148 /* by chipsets */
136 [board_ahci_mcp65] = 149 [board_ahci_mcp65] =
137 { 150 {
@@ -244,6 +257,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
244 { PCI_VDEVICE(INTEL, 0x1c05), board_ahci }, /* CPT RAID */ 257 { PCI_VDEVICE(INTEL, 0x1c05), board_ahci }, /* CPT RAID */
245 { PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */ 258 { PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */
246 { PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */ 259 { PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */
260 { PCI_VDEVICE(INTEL, 0x1d02), board_ahci }, /* PBG AHCI */
261 { PCI_VDEVICE(INTEL, 0x1d04), board_ahci }, /* PBG RAID */
262 { PCI_VDEVICE(INTEL, 0x1d06), board_ahci }, /* PBG RAID */
247 263
248 /* JMicron 360/1/3/5/6, match class to avoid IDE function */ 264 /* JMicron 360/1/3/5/6, match class to avoid IDE function */
249 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 265 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@@ -362,6 +378,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
362 /* Marvell */ 378 /* Marvell */
363 { PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */ 379 { PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */
364 { PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */ 380 { PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */
381 { PCI_DEVICE(0x1b4b, 0x9123),
382 .driver_data = board_ahci_yes_fbs }, /* 88se9128 */
365 383
366 /* Promise */ 384 /* Promise */
367 { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */ 385 { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 7113c5724471..e5fdeebf9ef0 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -209,6 +209,7 @@ enum {
209 link offline */ 209 link offline */
210 AHCI_HFLAG_NO_SNTF = (1 << 12), /* no sntf */ 210 AHCI_HFLAG_NO_SNTF = (1 << 12), /* no sntf */
211 AHCI_HFLAG_NO_FPDMA_AA = (1 << 13), /* no FPDMA AA */ 211 AHCI_HFLAG_NO_FPDMA_AA = (1 << 13), /* no FPDMA AA */
212 AHCI_HFLAG_YES_FBS = (1 << 14), /* force FBS cap on */
212 213
213 /* ap->flags bits */ 214 /* ap->flags bits */
214 215
@@ -297,7 +298,17 @@ struct ahci_host_priv {
297 298
298extern int ahci_ignore_sss; 299extern int ahci_ignore_sss;
299 300
300extern struct scsi_host_template ahci_sht; 301extern struct device_attribute *ahci_shost_attrs[];
302extern struct device_attribute *ahci_sdev_attrs[];
303
304#define AHCI_SHT(drv_name) \
305 ATA_NCQ_SHT(drv_name), \
306 .can_queue = AHCI_MAX_CMDS - 1, \
307 .sg_tablesize = AHCI_MAX_SG, \
308 .dma_boundary = AHCI_DMA_BOUNDARY, \
309 .shost_attrs = ahci_shost_attrs, \
310 .sdev_attrs = ahci_sdev_attrs
311
301extern struct ata_port_operations ahci_ops; 312extern struct ata_port_operations ahci_ops;
302 313
303void ahci_save_initial_config(struct device *dev, 314void ahci_save_initial_config(struct device *dev,
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index 4e97f33cca44..84b643270e7a 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -23,6 +23,10 @@
23#include <linux/ahci_platform.h> 23#include <linux/ahci_platform.h>
24#include "ahci.h" 24#include "ahci.h"
25 25
26static struct scsi_host_template ahci_platform_sht = {
27 AHCI_SHT("ahci_platform"),
28};
29
26static int __init ahci_probe(struct platform_device *pdev) 30static int __init ahci_probe(struct platform_device *pdev)
27{ 31{
28 struct device *dev = &pdev->dev; 32 struct device *dev = &pdev->dev;
@@ -145,7 +149,7 @@ static int __init ahci_probe(struct platform_device *pdev)
145 ahci_print_info(host, "platform"); 149 ahci_print_info(host, "platform");
146 150
147 rc = ata_host_activate(host, irq, ahci_interrupt, IRQF_SHARED, 151 rc = ata_host_activate(host, irq, ahci_interrupt, IRQF_SHARED,
148 &ahci_sht); 152 &ahci_platform_sht);
149 if (rc) 153 if (rc)
150 goto err0; 154 goto err0;
151 155
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 3971bc0a4838..d712675d0a96 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -302,6 +302,10 @@ static const struct pci_device_id piix_pci_tbl[] = {
302 { 0x8086, 0x1c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, 302 { 0x8086, 0x1c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
303 /* SATA Controller IDE (CPT) */ 303 /* SATA Controller IDE (CPT) */
304 { 0x8086, 0x1c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, 304 { 0x8086, 0x1c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
305 /* SATA Controller IDE (PBG) */
306 { 0x8086, 0x1d00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
307 /* SATA Controller IDE (PBG) */
308 { 0x8086, 0x1d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
305 { } /* terminate list */ 309 { } /* terminate list */
306}; 310};
307 311
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 81e772a94d59..8eea309ea212 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -121,7 +121,7 @@ static DEVICE_ATTR(ahci_port_cmd, S_IRUGO, ahci_show_port_cmd, NULL);
121static DEVICE_ATTR(em_buffer, S_IWUSR | S_IRUGO, 121static DEVICE_ATTR(em_buffer, S_IWUSR | S_IRUGO,
122 ahci_read_em_buffer, ahci_store_em_buffer); 122 ahci_read_em_buffer, ahci_store_em_buffer);
123 123
124static struct device_attribute *ahci_shost_attrs[] = { 124struct device_attribute *ahci_shost_attrs[] = {
125 &dev_attr_link_power_management_policy, 125 &dev_attr_link_power_management_policy,
126 &dev_attr_em_message_type, 126 &dev_attr_em_message_type,
127 &dev_attr_em_message, 127 &dev_attr_em_message,
@@ -132,22 +132,14 @@ static struct device_attribute *ahci_shost_attrs[] = {
132 &dev_attr_em_buffer, 132 &dev_attr_em_buffer,
133 NULL 133 NULL
134}; 134};
135EXPORT_SYMBOL_GPL(ahci_shost_attrs);
135 136
136static struct device_attribute *ahci_sdev_attrs[] = { 137struct device_attribute *ahci_sdev_attrs[] = {
137 &dev_attr_sw_activity, 138 &dev_attr_sw_activity,
138 &dev_attr_unload_heads, 139 &dev_attr_unload_heads,
139 NULL 140 NULL
140}; 141};
141 142EXPORT_SYMBOL_GPL(ahci_sdev_attrs);
142struct scsi_host_template ahci_sht = {
143 ATA_NCQ_SHT("ahci"),
144 .can_queue = AHCI_MAX_CMDS - 1,
145 .sg_tablesize = AHCI_MAX_SG,
146 .dma_boundary = AHCI_DMA_BOUNDARY,
147 .shost_attrs = ahci_shost_attrs,
148 .sdev_attrs = ahci_sdev_attrs,
149};
150EXPORT_SYMBOL_GPL(ahci_sht);
151 143
152struct ata_port_operations ahci_ops = { 144struct ata_port_operations ahci_ops = {
153 .inherits = &sata_pmp_port_ops, 145 .inherits = &sata_pmp_port_ops,
@@ -430,6 +422,12 @@ void ahci_save_initial_config(struct device *dev,
430 cap &= ~HOST_CAP_SNTF; 422 cap &= ~HOST_CAP_SNTF;
431 } 423 }
432 424
425 if (!(cap & HOST_CAP_FBS) && (hpriv->flags & AHCI_HFLAG_YES_FBS)) {
426 dev_printk(KERN_INFO, dev,
427 "controller can do FBS, turning on CAP_FBS\n");
428 cap |= HOST_CAP_FBS;
429 }
430
433 if (force_port_map && port_map != force_port_map) { 431 if (force_port_map && port_map != force_port_map) {
434 dev_printk(KERN_INFO, dev, "forcing port_map 0x%x -> 0x%x\n", 432 dev_printk(KERN_INFO, dev, "forcing port_map 0x%x -> 0x%x\n",
435 port_map, force_port_map); 433 port_map, force_port_map);
@@ -1320,7 +1318,7 @@ int ahci_do_softreset(struct ata_link *link, unsigned int *class,
1320 /* issue the first D2H Register FIS */ 1318 /* issue the first D2H Register FIS */
1321 msecs = 0; 1319 msecs = 0;
1322 now = jiffies; 1320 now = jiffies;
1323 if (time_after(now, deadline)) 1321 if (time_after(deadline, now))
1324 msecs = jiffies_to_msecs(deadline - now); 1322 msecs = jiffies_to_msecs(deadline - now);
1325 1323
1326 tf.ctl |= ATA_SRST; 1324 tf.ctl |= ATA_SRST;
@@ -2036,9 +2034,15 @@ static int ahci_port_start(struct ata_port *ap)
2036 u32 cmd = readl(port_mmio + PORT_CMD); 2034 u32 cmd = readl(port_mmio + PORT_CMD);
2037 if (cmd & PORT_CMD_FBSCP) 2035 if (cmd & PORT_CMD_FBSCP)
2038 pp->fbs_supported = true; 2036 pp->fbs_supported = true;
2039 else 2037 else if (hpriv->flags & AHCI_HFLAG_YES_FBS) {
2038 dev_printk(KERN_INFO, dev,
2039 "port %d can do FBS, forcing FBSCP\n",
2040 ap->port_no);
2041 pp->fbs_supported = true;
2042 } else
2040 dev_printk(KERN_WARNING, dev, 2043 dev_printk(KERN_WARNING, dev,
2041 "The port is not capable of FBS\n"); 2044 "port %d is not capable of FBS\n",
2045 ap->port_no);
2042 } 2046 }
2043 2047
2044 if (pp->fbs_supported) { 2048 if (pp->fbs_supported) {
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 7ef7c4f216fa..932eaee50245 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -5111,15 +5111,18 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
5111 qc->flags |= ATA_QCFLAG_ACTIVE; 5111 qc->flags |= ATA_QCFLAG_ACTIVE;
5112 ap->qc_active |= 1 << qc->tag; 5112 ap->qc_active |= 1 << qc->tag;
5113 5113
5114 /* We guarantee to LLDs that they will have at least one 5114 /*
5115 * We guarantee to LLDs that they will have at least one
5115 * non-zero sg if the command is a data command. 5116 * non-zero sg if the command is a data command.
5116 */ 5117 */
5117 BUG_ON(ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes)); 5118 if (WARN_ON_ONCE(ata_is_data(prot) &&
5119 (!qc->sg || !qc->n_elem || !qc->nbytes)))
5120 goto sys_err;
5118 5121
5119 if (ata_is_dma(prot) || (ata_is_pio(prot) && 5122 if (ata_is_dma(prot) || (ata_is_pio(prot) &&
5120 (ap->flags & ATA_FLAG_PIO_DMA))) 5123 (ap->flags & ATA_FLAG_PIO_DMA)))
5121 if (ata_sg_setup(qc)) 5124 if (ata_sg_setup(qc))
5122 goto sg_err; 5125 goto sys_err;
5123 5126
5124 /* if device is sleeping, schedule reset and abort the link */ 5127 /* if device is sleeping, schedule reset and abort the link */
5125 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) { 5128 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
@@ -5136,7 +5139,7 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
5136 goto err; 5139 goto err;
5137 return; 5140 return;
5138 5141
5139sg_err: 5142sys_err:
5140 qc->err_mask |= AC_ERR_SYSTEM; 5143 qc->err_mask |= AC_ERR_SYSTEM;
5141err: 5144err:
5142 ata_qc_complete(qc); 5145 ata_qc_complete(qc);
@@ -5415,6 +5418,7 @@ static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5415 */ 5418 */
5416int ata_host_suspend(struct ata_host *host, pm_message_t mesg) 5419int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5417{ 5420{
5421 unsigned int ehi_flags = ATA_EHI_QUIET;
5418 int rc; 5422 int rc;
5419 5423
5420 /* 5424 /*
@@ -5423,7 +5427,18 @@ int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5423 */ 5427 */
5424 ata_lpm_enable(host); 5428 ata_lpm_enable(host);
5425 5429
5426 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1); 5430 /*
5431 * On some hardware, device fails to respond after spun down
5432 * for suspend. As the device won't be used before being
5433 * resumed, we don't need to touch the device. Ask EH to skip
5434 * the usual stuff and proceed directly to suspend.
5435 *
5436 * http://thread.gmane.org/gmane.linux.ide/46764
5437 */
5438 if (mesg.event == PM_EVENT_SUSPEND)
5439 ehi_flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_NO_RECOVERY;
5440
5441 rc = ata_host_request_pm(host, mesg, 0, ehi_flags, 1);
5427 if (rc == 0) 5442 if (rc == 0)
5428 host->dev->power.power_state = mesg; 5443 host->dev->power.power_state = mesg;
5429 return rc; 5444 return rc;
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index c9ae299b8342..e48302eae55f 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -3235,6 +3235,10 @@ static int ata_eh_skip_recovery(struct ata_link *link)
3235 if (link->flags & ATA_LFLAG_DISABLED) 3235 if (link->flags & ATA_LFLAG_DISABLED)
3236 return 1; 3236 return 1;
3237 3237
3238 /* skip if explicitly requested */
3239 if (ehc->i.flags & ATA_EHI_NO_RECOVERY)
3240 return 1;
3241
3238 /* thaw frozen port and recover failed devices */ 3242 /* thaw frozen port and recover failed devices */
3239 if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link)) 3243 if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link))
3240 return 0; 3244 return 0;
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 674c1436491f..e30c537cce32 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -418,6 +418,7 @@ void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
418 if (ioaddr->ctl_addr) 418 if (ioaddr->ctl_addr)
419 iowrite8(tf->ctl, ioaddr->ctl_addr); 419 iowrite8(tf->ctl, ioaddr->ctl_addr);
420 ap->last_ctl = tf->ctl; 420 ap->last_ctl = tf->ctl;
421 ata_wait_idle(ap);
421 } 422 }
422 423
423 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { 424 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
@@ -453,6 +454,8 @@ void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
453 iowrite8(tf->device, ioaddr->device_addr); 454 iowrite8(tf->device, ioaddr->device_addr);
454 VPRINTK("device 0x%X\n", tf->device); 455 VPRINTK("device 0x%X\n", tf->device);
455 } 456 }
457
458 ata_wait_idle(ap);
456} 459}
457EXPORT_SYMBOL_GPL(ata_sff_tf_load); 460EXPORT_SYMBOL_GPL(ata_sff_tf_load);
458 461
@@ -1042,7 +1045,8 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
1042int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, 1045int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
1043 u8 status, int in_wq) 1046 u8 status, int in_wq)
1044{ 1047{
1045 struct ata_eh_info *ehi = &ap->link.eh_info; 1048 struct ata_link *link = qc->dev->link;
1049 struct ata_eh_info *ehi = &link->eh_info;
1046 unsigned long flags = 0; 1050 unsigned long flags = 0;
1047 int poll_next; 1051 int poll_next;
1048 1052
@@ -1298,8 +1302,14 @@ fsm_start:
1298} 1302}
1299EXPORT_SYMBOL_GPL(ata_sff_hsm_move); 1303EXPORT_SYMBOL_GPL(ata_sff_hsm_move);
1300 1304
1301void ata_sff_queue_pio_task(struct ata_port *ap, unsigned long delay) 1305void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay)
1302{ 1306{
1307 struct ata_port *ap = link->ap;
1308
1309 WARN_ON((ap->sff_pio_task_link != NULL) &&
1310 (ap->sff_pio_task_link != link));
1311 ap->sff_pio_task_link = link;
1312
1303 /* may fail if ata_sff_flush_pio_task() in progress */ 1313 /* may fail if ata_sff_flush_pio_task() in progress */
1304 queue_delayed_work(ata_sff_wq, &ap->sff_pio_task, 1314 queue_delayed_work(ata_sff_wq, &ap->sff_pio_task,
1305 msecs_to_jiffies(delay)); 1315 msecs_to_jiffies(delay));
@@ -1321,14 +1331,18 @@ static void ata_sff_pio_task(struct work_struct *work)
1321{ 1331{
1322 struct ata_port *ap = 1332 struct ata_port *ap =
1323 container_of(work, struct ata_port, sff_pio_task.work); 1333 container_of(work, struct ata_port, sff_pio_task.work);
1334 struct ata_link *link = ap->sff_pio_task_link;
1324 struct ata_queued_cmd *qc; 1335 struct ata_queued_cmd *qc;
1325 u8 status; 1336 u8 status;
1326 int poll_next; 1337 int poll_next;
1327 1338
1339 BUG_ON(ap->sff_pio_task_link == NULL);
1328 /* qc can be NULL if timeout occurred */ 1340 /* qc can be NULL if timeout occurred */
1329 qc = ata_qc_from_tag(ap, ap->link.active_tag); 1341 qc = ata_qc_from_tag(ap, link->active_tag);
1330 if (!qc) 1342 if (!qc) {
1343 ap->sff_pio_task_link = NULL;
1331 return; 1344 return;
1345 }
1332 1346
1333fsm_start: 1347fsm_start:
1334 WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE); 1348 WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE);
@@ -1345,11 +1359,16 @@ fsm_start:
1345 msleep(2); 1359 msleep(2);
1346 status = ata_sff_busy_wait(ap, ATA_BUSY, 10); 1360 status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
1347 if (status & ATA_BUSY) { 1361 if (status & ATA_BUSY) {
1348 ata_sff_queue_pio_task(ap, ATA_SHORT_PAUSE); 1362 ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE);
1349 return; 1363 return;
1350 } 1364 }
1351 } 1365 }
1352 1366
1367 /*
1368 * hsm_move() may trigger another command to be processed.
1369 * clean the link beforehand.
1370 */
1371 ap->sff_pio_task_link = NULL;
1353 /* move the HSM */ 1372 /* move the HSM */
1354 poll_next = ata_sff_hsm_move(ap, qc, status, 1); 1373 poll_next = ata_sff_hsm_move(ap, qc, status, 1);
1355 1374
@@ -1376,6 +1395,7 @@ fsm_start:
1376unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc) 1395unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
1377{ 1396{
1378 struct ata_port *ap = qc->ap; 1397 struct ata_port *ap = qc->ap;
1398 struct ata_link *link = qc->dev->link;
1379 1399
1380 /* Use polling pio if the LLD doesn't handle 1400 /* Use polling pio if the LLD doesn't handle
1381 * interrupt driven pio and atapi CDB interrupt. 1401 * interrupt driven pio and atapi CDB interrupt.
@@ -1396,7 +1416,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
1396 ap->hsm_task_state = HSM_ST_LAST; 1416 ap->hsm_task_state = HSM_ST_LAST;
1397 1417
1398 if (qc->tf.flags & ATA_TFLAG_POLLING) 1418 if (qc->tf.flags & ATA_TFLAG_POLLING)
1399 ata_sff_queue_pio_task(ap, 0); 1419 ata_sff_queue_pio_task(link, 0);
1400 1420
1401 break; 1421 break;
1402 1422
@@ -1409,7 +1429,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
1409 if (qc->tf.flags & ATA_TFLAG_WRITE) { 1429 if (qc->tf.flags & ATA_TFLAG_WRITE) {
1410 /* PIO data out protocol */ 1430 /* PIO data out protocol */
1411 ap->hsm_task_state = HSM_ST_FIRST; 1431 ap->hsm_task_state = HSM_ST_FIRST;
1412 ata_sff_queue_pio_task(ap, 0); 1432 ata_sff_queue_pio_task(link, 0);
1413 1433
1414 /* always send first data block using the 1434 /* always send first data block using the
1415 * ata_sff_pio_task() codepath. 1435 * ata_sff_pio_task() codepath.
@@ -1419,7 +1439,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
1419 ap->hsm_task_state = HSM_ST; 1439 ap->hsm_task_state = HSM_ST;
1420 1440
1421 if (qc->tf.flags & ATA_TFLAG_POLLING) 1441 if (qc->tf.flags & ATA_TFLAG_POLLING)
1422 ata_sff_queue_pio_task(ap, 0); 1442 ata_sff_queue_pio_task(link, 0);
1423 1443
1424 /* if polling, ata_sff_pio_task() handles the 1444 /* if polling, ata_sff_pio_task() handles the
1425 * rest. otherwise, interrupt handler takes 1445 * rest. otherwise, interrupt handler takes
@@ -1441,7 +1461,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
1441 /* send cdb by polling if no cdb interrupt */ 1461 /* send cdb by polling if no cdb interrupt */
1442 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) || 1462 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
1443 (qc->tf.flags & ATA_TFLAG_POLLING)) 1463 (qc->tf.flags & ATA_TFLAG_POLLING))
1444 ata_sff_queue_pio_task(ap, 0); 1464 ata_sff_queue_pio_task(link, 0);
1445 break; 1465 break;
1446 1466
1447 default: 1467 default:
@@ -2734,10 +2754,7 @@ EXPORT_SYMBOL_GPL(ata_bmdma_dumb_qc_prep);
2734unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc) 2754unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc)
2735{ 2755{
2736 struct ata_port *ap = qc->ap; 2756 struct ata_port *ap = qc->ap;
2737 2757 struct ata_link *link = qc->dev->link;
2738 /* see ata_dma_blacklisted() */
2739 BUG_ON((ap->flags & ATA_FLAG_PIO_POLLING) &&
2740 qc->tf.protocol == ATAPI_PROT_DMA);
2741 2758
2742 /* defer PIO handling to sff_qc_issue */ 2759 /* defer PIO handling to sff_qc_issue */
2743 if (!ata_is_dma(qc->tf.protocol)) 2760 if (!ata_is_dma(qc->tf.protocol))
@@ -2766,7 +2783,7 @@ unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc)
2766 2783
2767 /* send cdb by polling if no cdb interrupt */ 2784 /* send cdb by polling if no cdb interrupt */
2768 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) 2785 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
2769 ata_sff_queue_pio_task(ap, 0); 2786 ata_sff_queue_pio_task(link, 0);
2770 break; 2787 break;
2771 2788
2772 default: 2789 default:
diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c
index ba43f0f8c880..2215632e4b31 100644
--- a/drivers/ata/pata_artop.c
+++ b/drivers/ata/pata_artop.c
@@ -74,7 +74,8 @@ static int artop6260_pre_reset(struct ata_link *link, unsigned long deadline)
74 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 74 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
75 75
76 /* Odd numbered device ids are the units with enable bits (the -R cards) */ 76 /* Odd numbered device ids are the units with enable bits (the -R cards) */
77 if (pdev->device % 1 && !pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no])) 77 if ((pdev->device & 1) &&
78 !pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no]))
78 return -ENOENT; 79 return -ENOENT;
79 80
80 return ata_sff_prereset(link, deadline); 81 return ata_sff_prereset(link, deadline);
diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
index 9f5da1c7454b..905ff76d3cbb 100644
--- a/drivers/ata/pata_cmd64x.c
+++ b/drivers/ata/pata_cmd64x.c
@@ -121,14 +121,8 @@ static void cmd64x_set_timing(struct ata_port *ap, struct ata_device *adev, u8 m
121 121
122 if (pair) { 122 if (pair) {
123 struct ata_timing tp; 123 struct ata_timing tp;
124
125 ata_timing_compute(pair, pair->pio_mode, &tp, T, 0); 124 ata_timing_compute(pair, pair->pio_mode, &tp, T, 0);
126 ata_timing_merge(&t, &tp, &t, ATA_TIMING_SETUP); 125 ata_timing_merge(&t, &tp, &t, ATA_TIMING_SETUP);
127 if (pair->dma_mode) {
128 ata_timing_compute(pair, pair->dma_mode,
129 &tp, T, 0);
130 ata_timing_merge(&tp, &t, &t, ATA_TIMING_SETUP);
131 }
132 } 126 }
133 } 127 }
134 128
diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
index 9df1ff7e1eaa..eaf194138f21 100644
--- a/drivers/ata/pata_legacy.c
+++ b/drivers/ata/pata_legacy.c
@@ -44,6 +44,9 @@
44 * Specific support is included for the ht6560a/ht6560b/opti82c611a/ 44 * Specific support is included for the ht6560a/ht6560b/opti82c611a/
45 * opti82c465mv/promise 20230c/20630/qdi65x0/winbond83759A 45 * opti82c465mv/promise 20230c/20630/qdi65x0/winbond83759A
46 * 46 *
47 * Support for the Winbond 83759A when operating in advanced mode.
48 * Multichip mode is not currently supported.
49 *
47 * Use the autospeed and pio_mask options with: 50 * Use the autospeed and pio_mask options with:
48 * Appian ADI/2 aka CLPD7220 or AIC25VL01. 51 * Appian ADI/2 aka CLPD7220 or AIC25VL01.
49 * Use the jumpers, autospeed and set pio_mask to the mode on the jumpers with 52 * Use the jumpers, autospeed and set pio_mask to the mode on the jumpers with
@@ -135,12 +138,18 @@ static int ht6560b; /* HT 6560A on primary 1, second 2, both 3 */
135static int opti82c611a; /* Opti82c611A on primary 1, sec 2, both 3 */ 138static int opti82c611a; /* Opti82c611A on primary 1, sec 2, both 3 */
136static int opti82c46x; /* Opti 82c465MV present(pri/sec autodetect) */ 139static int opti82c46x; /* Opti 82c465MV present(pri/sec autodetect) */
137static int qdi; /* Set to probe QDI controllers */ 140static int qdi; /* Set to probe QDI controllers */
138static int winbond; /* Set to probe Winbond controllers,
139 give I/O port if non standard */
140static int autospeed; /* Chip present which snoops speed changes */ 141static int autospeed; /* Chip present which snoops speed changes */
141static int pio_mask = ATA_PIO4; /* PIO range for autospeed devices */ 142static int pio_mask = ATA_PIO4; /* PIO range for autospeed devices */
142static int iordy_mask = 0xFFFFFFFF; /* Use iordy if available */ 143static int iordy_mask = 0xFFFFFFFF; /* Use iordy if available */
143 144
145#ifdef PATA_WINBOND_VLB_MODULE
146static int winbond = 1; /* Set to probe Winbond controllers,
147 give I/O port if non standard */
148#else
149static int winbond; /* Set to probe Winbond controllers,
150 give I/O port if non standard */
151#endif
152
144/** 153/**
145 * legacy_probe_add - Add interface to probe list 154 * legacy_probe_add - Add interface to probe list
146 * @port: Controller port 155 * @port: Controller port
@@ -1297,6 +1306,7 @@ MODULE_AUTHOR("Alan Cox");
1297MODULE_DESCRIPTION("low-level driver for legacy ATA"); 1306MODULE_DESCRIPTION("low-level driver for legacy ATA");
1298MODULE_LICENSE("GPL"); 1307MODULE_LICENSE("GPL");
1299MODULE_VERSION(DRV_VERSION); 1308MODULE_VERSION(DRV_VERSION);
1309MODULE_ALIAS("pata_winbond");
1300 1310
1301module_param(probe_all, int, 0); 1311module_param(probe_all, int, 0);
1302module_param(autospeed, int, 0); 1312module_param(autospeed, int, 0);
@@ -1305,6 +1315,7 @@ module_param(ht6560b, int, 0);
1305module_param(opti82c611a, int, 0); 1315module_param(opti82c611a, int, 0);
1306module_param(opti82c46x, int, 0); 1316module_param(opti82c46x, int, 0);
1307module_param(qdi, int, 0); 1317module_param(qdi, int, 0);
1318module_param(winbond, int, 0);
1308module_param(pio_mask, int, 0); 1319module_param(pio_mask, int, 0);
1309module_param(iordy_mask, int, 0); 1320module_param(iordy_mask, int, 0);
1310 1321
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
index 5e659885de16..ac8d7d97e408 100644
--- a/drivers/ata/pata_via.c
+++ b/drivers/ata/pata_via.c
@@ -417,6 +417,8 @@ static void via_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
417 tf->lbam, 417 tf->lbam,
418 tf->lbah); 418 tf->lbah);
419 } 419 }
420
421 ata_wait_idle(ap);
420} 422}
421 423
422static int via_port_start(struct ata_port *ap) 424static int via_port_start(struct ata_port *ap)
diff --git a/drivers/ata/pata_winbond.c b/drivers/ata/pata_winbond.c
deleted file mode 100644
index 6d8619b6f670..000000000000
--- a/drivers/ata/pata_winbond.c
+++ /dev/null
@@ -1,282 +0,0 @@
1/*
2 * pata_winbond.c - Winbond VLB ATA controllers
3 * (C) 2006 Red Hat
4 *
5 * Support for the Winbond 83759A when operating in advanced mode.
6 * Multichip mode is not currently supported.
7 */
8
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/init.h>
12#include <linux/blkdev.h>
13#include <linux/delay.h>
14#include <scsi/scsi_host.h>
15#include <linux/libata.h>
16#include <linux/platform_device.h>
17
18#define DRV_NAME "pata_winbond"
19#define DRV_VERSION "0.0.3"
20
21#define NR_HOST 4 /* Two winbond controllers, two channels each */
22
23struct winbond_data {
24 unsigned long config;
25 struct platform_device *platform_dev;
26};
27
28static struct ata_host *winbond_host[NR_HOST];
29static struct winbond_data winbond_data[NR_HOST];
30static int nr_winbond_host;
31
32#ifdef MODULE
33static int probe_winbond = 1;
34#else
35static int probe_winbond;
36#endif
37
38static DEFINE_SPINLOCK(winbond_lock);
39
40static void winbond_writecfg(unsigned long port, u8 reg, u8 val)
41{
42 unsigned long flags;
43 spin_lock_irqsave(&winbond_lock, flags);
44 outb(reg, port + 0x01);
45 outb(val, port + 0x02);
46 spin_unlock_irqrestore(&winbond_lock, flags);
47}
48
49static u8 winbond_readcfg(unsigned long port, u8 reg)
50{
51 u8 val;
52
53 unsigned long flags;
54 spin_lock_irqsave(&winbond_lock, flags);
55 outb(reg, port + 0x01);
56 val = inb(port + 0x02);
57 spin_unlock_irqrestore(&winbond_lock, flags);
58
59 return val;
60}
61
62static void winbond_set_piomode(struct ata_port *ap, struct ata_device *adev)
63{
64 struct ata_timing t;
65 struct winbond_data *winbond = ap->host->private_data;
66 int active, recovery;
67 u8 reg;
68 int timing = 0x88 + (ap->port_no * 4) + (adev->devno * 2);
69
70 reg = winbond_readcfg(winbond->config, 0x81);
71
72 /* Get the timing data in cycles */
73 if (reg & 0x40) /* Fast VLB bus, assume 50MHz */
74 ata_timing_compute(adev, adev->pio_mode, &t, 20000, 1000);
75 else
76 ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
77
78 active = (clamp_val(t.active, 3, 17) - 1) & 0x0F;
79 recovery = (clamp_val(t.recover, 1, 15) + 1) & 0x0F;
80 timing = (active << 4) | recovery;
81 winbond_writecfg(winbond->config, timing, reg);
82
83 /* Load the setup timing */
84
85 reg = 0x35;
86 if (adev->class != ATA_DEV_ATA)
87 reg |= 0x08; /* FIFO off */
88 if (!ata_pio_need_iordy(adev))
89 reg |= 0x02; /* IORDY off */
90 reg |= (clamp_val(t.setup, 0, 3) << 6);
91 winbond_writecfg(winbond->config, timing + 1, reg);
92}
93
94
95static unsigned int winbond_data_xfer(struct ata_device *dev,
96 unsigned char *buf, unsigned int buflen, int rw)
97{
98 struct ata_port *ap = dev->link->ap;
99 int slop = buflen & 3;
100
101 if (ata_id_has_dword_io(dev->id)) {
102 if (rw == READ)
103 ioread32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
104 else
105 iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
106
107 if (unlikely(slop)) {
108 __le32 pad;
109 if (rw == READ) {
110 pad = cpu_to_le32(ioread32(ap->ioaddr.data_addr));
111 memcpy(buf + buflen - slop, &pad, slop);
112 } else {
113 memcpy(&pad, buf + buflen - slop, slop);
114 iowrite32(le32_to_cpu(pad), ap->ioaddr.data_addr);
115 }
116 buflen += 4 - slop;
117 }
118 } else
119 buflen = ata_sff_data_xfer(dev, buf, buflen, rw);
120
121 return buflen;
122}
123
124static struct scsi_host_template winbond_sht = {
125 ATA_PIO_SHT(DRV_NAME),
126};
127
128static struct ata_port_operations winbond_port_ops = {
129 .inherits = &ata_sff_port_ops,
130 .sff_data_xfer = winbond_data_xfer,
131 .cable_detect = ata_cable_40wire,
132 .set_piomode = winbond_set_piomode,
133};
134
135/**
136 * winbond_init_one - attach a winbond interface
137 * @type: Type to display
138 * @io: I/O port start
139 * @irq: interrupt line
140 * @fast: True if on a > 33Mhz VLB
141 *
142 * Register a VLB bus IDE interface. Such interfaces are PIO and we
143 * assume do not support IRQ sharing.
144 */
145
146static __init int winbond_init_one(unsigned long port)
147{
148 struct platform_device *pdev;
149 u8 reg;
150 int i, rc;
151
152 reg = winbond_readcfg(port, 0x81);
153 reg |= 0x80; /* jumpered mode off */
154 winbond_writecfg(port, 0x81, reg);
155 reg = winbond_readcfg(port, 0x83);
156 reg |= 0xF0; /* local control */
157 winbond_writecfg(port, 0x83, reg);
158 reg = winbond_readcfg(port, 0x85);
159 reg |= 0xF0; /* programmable timing */
160 winbond_writecfg(port, 0x85, reg);
161
162 reg = winbond_readcfg(port, 0x81);
163
164 if (!(reg & 0x03)) /* Disabled */
165 return -ENODEV;
166
167 for (i = 0; i < 2 ; i ++) {
168 unsigned long cmd_port = 0x1F0 - (0x80 * i);
169 unsigned long ctl_port = cmd_port + 0x206;
170 struct ata_host *host;
171 struct ata_port *ap;
172 void __iomem *cmd_addr, *ctl_addr;
173
174 if (!(reg & (1 << i)))
175 continue;
176
177 pdev = platform_device_register_simple(DRV_NAME, nr_winbond_host, NULL, 0);
178 if (IS_ERR(pdev))
179 return PTR_ERR(pdev);
180
181 rc = -ENOMEM;
182 host = ata_host_alloc(&pdev->dev, 1);
183 if (!host)
184 goto err_unregister;
185 ap = host->ports[0];
186
187 rc = -ENOMEM;
188 cmd_addr = devm_ioport_map(&pdev->dev, cmd_port, 8);
189 ctl_addr = devm_ioport_map(&pdev->dev, ctl_port, 1);
190 if (!cmd_addr || !ctl_addr)
191 goto err_unregister;
192
193 ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", cmd_port, ctl_port);
194
195 ap->ops = &winbond_port_ops;
196 ap->pio_mask = ATA_PIO4;
197 ap->flags |= ATA_FLAG_SLAVE_POSS;
198 ap->ioaddr.cmd_addr = cmd_addr;
199 ap->ioaddr.altstatus_addr = ctl_addr;
200 ap->ioaddr.ctl_addr = ctl_addr;
201 ata_sff_std_ports(&ap->ioaddr);
202
203 /* hook in a private data structure per channel */
204 host->private_data = &winbond_data[nr_winbond_host];
205 winbond_data[nr_winbond_host].config = port;
206 winbond_data[nr_winbond_host].platform_dev = pdev;
207
208 /* activate */
209 rc = ata_host_activate(host, 14 + i, ata_sff_interrupt, 0,
210 &winbond_sht);
211 if (rc)
212 goto err_unregister;
213
214 winbond_host[nr_winbond_host++] = dev_get_drvdata(&pdev->dev);
215 }
216
217 return 0;
218
219 err_unregister:
220 platform_device_unregister(pdev);
221 return rc;
222}
223
224/**
225 * winbond_init - attach winbond interfaces
226 *
227 * Attach winbond IDE interfaces by scanning the ports it may occupy.
228 */
229
230static __init int winbond_init(void)
231{
232 static const unsigned long config[2] = { 0x130, 0x1B0 };
233
234 int ct = 0;
235 int i;
236
237 if (probe_winbond == 0)
238 return -ENODEV;
239
240 /*
241 * Check both base addresses
242 */
243
244 for (i = 0; i < 2; i++) {
245 if (probe_winbond & (1<<i)) {
246 int ret = 0;
247 unsigned long port = config[i];
248
249 if (request_region(port, 2, "pata_winbond")) {
250 ret = winbond_init_one(port);
251 if (ret <= 0)
252 release_region(port, 2);
253 else ct+= ret;
254 }
255 }
256 }
257 if (ct != 0)
258 return 0;
259 return -ENODEV;
260}
261
262static __exit void winbond_exit(void)
263{
264 int i;
265
266 for (i = 0; i < nr_winbond_host; i++) {
267 ata_host_detach(winbond_host[i]);
268 release_region(winbond_data[i].config, 2);
269 platform_device_unregister(winbond_data[i].platform_dev);
270 }
271}
272
273MODULE_AUTHOR("Alan Cox");
274MODULE_DESCRIPTION("low-level driver for Winbond VL ATA");
275MODULE_LICENSE("GPL");
276MODULE_VERSION(DRV_VERSION);
277
278module_init(winbond_init);
279module_exit(winbond_exit);
280
281module_param(probe_winbond, int, 0);
282
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
index 2673a3d14806..6cf57c5c2b5f 100644
--- a/drivers/ata/sata_dwc_460ex.c
+++ b/drivers/ata/sata_dwc_460ex.c
@@ -1459,7 +1459,7 @@ static void sata_dwc_qc_prep_by_tag(struct ata_queued_cmd *qc, u8 tag)
1459{ 1459{
1460 struct scatterlist *sg = qc->sg; 1460 struct scatterlist *sg = qc->sg;
1461 struct ata_port *ap = qc->ap; 1461 struct ata_port *ap = qc->ap;
1462 u32 dma_chan; 1462 int dma_chan;
1463 struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); 1463 struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
1464 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); 1464 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
1465 int err; 1465 int err;
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 9463c71dd38e..a9fd9709c262 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -1898,19 +1898,25 @@ static void mv_bmdma_start(struct ata_queued_cmd *qc)
1898 * LOCKING: 1898 * LOCKING:
1899 * Inherited from caller. 1899 * Inherited from caller.
1900 */ 1900 */
1901static void mv_bmdma_stop(struct ata_queued_cmd *qc) 1901static void mv_bmdma_stop_ap(struct ata_port *ap)
1902{ 1902{
1903 struct ata_port *ap = qc->ap;
1904 void __iomem *port_mmio = mv_ap_base(ap); 1903 void __iomem *port_mmio = mv_ap_base(ap);
1905 u32 cmd; 1904 u32 cmd;
1906 1905
1907 /* clear start/stop bit */ 1906 /* clear start/stop bit */
1908 cmd = readl(port_mmio + BMDMA_CMD); 1907 cmd = readl(port_mmio + BMDMA_CMD);
1909 cmd &= ~ATA_DMA_START; 1908 if (cmd & ATA_DMA_START) {
1910 writelfl(cmd, port_mmio + BMDMA_CMD); 1909 cmd &= ~ATA_DMA_START;
1910 writelfl(cmd, port_mmio + BMDMA_CMD);
1911
1912 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
1913 ata_sff_dma_pause(ap);
1914 }
1915}
1911 1916
1912 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ 1917static void mv_bmdma_stop(struct ata_queued_cmd *qc)
1913 ata_sff_dma_pause(ap); 1918{
1919 mv_bmdma_stop_ap(qc->ap);
1914} 1920}
1915 1921
1916/** 1922/**
@@ -1934,8 +1940,21 @@ static u8 mv_bmdma_status(struct ata_port *ap)
1934 reg = readl(port_mmio + BMDMA_STATUS); 1940 reg = readl(port_mmio + BMDMA_STATUS);
1935 if (reg & ATA_DMA_ACTIVE) 1941 if (reg & ATA_DMA_ACTIVE)
1936 status = ATA_DMA_ACTIVE; 1942 status = ATA_DMA_ACTIVE;
1937 else 1943 else if (reg & ATA_DMA_ERR)
1938 status = (reg & ATA_DMA_ERR) | ATA_DMA_INTR; 1944 status = (reg & ATA_DMA_ERR) | ATA_DMA_INTR;
1945 else {
1946 /*
1947 * Just because DMA_ACTIVE is 0 (DMA completed),
1948 * this does _not_ mean the device is "done".
1949 * So we should not yet be signalling ATA_DMA_INTR
1950 * in some cases. Eg. DSM/TRIM, and perhaps others.
1951 */
1952 mv_bmdma_stop_ap(ap);
1953 if (ioread8(ap->ioaddr.altstatus_addr) & ATA_BUSY)
1954 status = 0;
1955 else
1956 status = ATA_DMA_INTR;
1957 }
1939 return status; 1958 return status;
1940} 1959}
1941 1960
@@ -1995,6 +2014,9 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
1995 2014
1996 switch (tf->protocol) { 2015 switch (tf->protocol) {
1997 case ATA_PROT_DMA: 2016 case ATA_PROT_DMA:
2017 if (tf->command == ATA_CMD_DSM)
2018 return;
2019 /* fall-thru */
1998 case ATA_PROT_NCQ: 2020 case ATA_PROT_NCQ:
1999 break; /* continue below */ 2021 break; /* continue below */
2000 case ATA_PROT_PIO: 2022 case ATA_PROT_PIO:
@@ -2094,6 +2116,8 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
2094 if ((tf->protocol != ATA_PROT_DMA) && 2116 if ((tf->protocol != ATA_PROT_DMA) &&
2095 (tf->protocol != ATA_PROT_NCQ)) 2117 (tf->protocol != ATA_PROT_NCQ))
2096 return; 2118 return;
2119 if (tf->command == ATA_CMD_DSM)
2120 return; /* use bmdma for this */
2097 2121
2098 /* Fill in Gen IIE command request block */ 2122 /* Fill in Gen IIE command request block */
2099 if (!(tf->flags & ATA_TFLAG_WRITE)) 2123 if (!(tf->flags & ATA_TFLAG_WRITE))
@@ -2260,7 +2284,7 @@ static unsigned int mv_qc_issue_fis(struct ata_queued_cmd *qc)
2260 } 2284 }
2261 2285
2262 if (qc->tf.flags & ATA_TFLAG_POLLING) 2286 if (qc->tf.flags & ATA_TFLAG_POLLING)
2263 ata_sff_queue_pio_task(ap, 0); 2287 ata_sff_queue_pio_task(link, 0);
2264 return 0; 2288 return 0;
2265} 2289}
2266 2290
@@ -2289,6 +2313,12 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
2289 2313
2290 switch (qc->tf.protocol) { 2314 switch (qc->tf.protocol) {
2291 case ATA_PROT_DMA: 2315 case ATA_PROT_DMA:
2316 if (qc->tf.command == ATA_CMD_DSM) {
2317 if (!ap->ops->bmdma_setup) /* no bmdma on GEN_I */
2318 return AC_ERR_OTHER;
2319 break; /* use bmdma for this */
2320 }
2321 /* fall thru */
2292 case ATA_PROT_NCQ: 2322 case ATA_PROT_NCQ:
2293 mv_start_edma(ap, port_mmio, pp, qc->tf.protocol); 2323 mv_start_edma(ap, port_mmio, pp, qc->tf.protocol);
2294 pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK; 2324 pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK;
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index ee9ddeb53417..8cb0347dec28 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -3156,7 +3156,6 @@ static int __devinit ia_init_one(struct pci_dev *pdev,
3156{ 3156{
3157 struct atm_dev *dev; 3157 struct atm_dev *dev;
3158 IADEV *iadev; 3158 IADEV *iadev;
3159 unsigned long flags;
3160 int ret; 3159 int ret;
3161 3160
3162 iadev = kzalloc(sizeof(*iadev), GFP_KERNEL); 3161 iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
@@ -3188,19 +3187,14 @@ static int __devinit ia_init_one(struct pci_dev *pdev,
3188 ia_dev[iadev_count] = iadev; 3187 ia_dev[iadev_count] = iadev;
3189 _ia_dev[iadev_count] = dev; 3188 _ia_dev[iadev_count] = dev;
3190 iadev_count++; 3189 iadev_count++;
3191 spin_lock_init(&iadev->misc_lock);
3192 /* First fixes first. I don't want to think about this now. */
3193 spin_lock_irqsave(&iadev->misc_lock, flags);
3194 if (ia_init(dev) || ia_start(dev)) { 3190 if (ia_init(dev) || ia_start(dev)) {
3195 IF_INIT(printk("IA register failed!\n");) 3191 IF_INIT(printk("IA register failed!\n");)
3196 iadev_count--; 3192 iadev_count--;
3197 ia_dev[iadev_count] = NULL; 3193 ia_dev[iadev_count] = NULL;
3198 _ia_dev[iadev_count] = NULL; 3194 _ia_dev[iadev_count] = NULL;
3199 spin_unlock_irqrestore(&iadev->misc_lock, flags);
3200 ret = -EINVAL; 3195 ret = -EINVAL;
3201 goto err_out_deregister_dev; 3196 goto err_out_deregister_dev;
3202 } 3197 }
3203 spin_unlock_irqrestore(&iadev->misc_lock, flags);
3204 IF_EVENT(printk("iadev_count = %d\n", iadev_count);) 3198 IF_EVENT(printk("iadev_count = %d\n", iadev_count);)
3205 3199
3206 iadev->next_board = ia_boards; 3200 iadev->next_board = ia_boards;
diff --git a/drivers/atm/iphase.h b/drivers/atm/iphase.h
index b2cd20f549cb..077735e0e04b 100644
--- a/drivers/atm/iphase.h
+++ b/drivers/atm/iphase.h
@@ -1022,7 +1022,7 @@ typedef struct iadev_t {
1022 struct dle_q rx_dle_q; 1022 struct dle_q rx_dle_q;
1023 struct free_desc_q *rx_free_desc_qhead; 1023 struct free_desc_q *rx_free_desc_qhead;
1024 struct sk_buff_head rx_dma_q; 1024 struct sk_buff_head rx_dma_q;
1025 spinlock_t rx_lock, misc_lock; 1025 spinlock_t rx_lock;
1026 struct atm_vcc **rx_open; /* list of all open VCs */ 1026 struct atm_vcc **rx_open; /* list of all open VCs */
1027 u16 num_rx_desc, rx_buf_sz, rxing; 1027 u16 num_rx_desc, rx_buf_sz, rxing;
1028 u32 rx_pkt_ram, rx_tmp_cnt; 1028 u32 rx_pkt_ram, rx_tmp_cnt;
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index f916ddf63938..f46138ab38b6 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -444,6 +444,7 @@ static ssize_t console_show(struct device *dev, struct device_attribute *attr,
444 struct atm_dev *atmdev = container_of(dev, struct atm_dev, class_dev); 444 struct atm_dev *atmdev = container_of(dev, struct atm_dev, class_dev);
445 struct solos_card *card = atmdev->dev_data; 445 struct solos_card *card = atmdev->dev_data;
446 struct sk_buff *skb; 446 struct sk_buff *skb;
447 unsigned int len;
447 448
448 spin_lock(&card->cli_queue_lock); 449 spin_lock(&card->cli_queue_lock);
449 skb = skb_dequeue(&card->cli_queue[SOLOS_CHAN(atmdev)]); 450 skb = skb_dequeue(&card->cli_queue[SOLOS_CHAN(atmdev)]);
@@ -451,11 +452,12 @@ static ssize_t console_show(struct device *dev, struct device_attribute *attr,
451 if(skb == NULL) 452 if(skb == NULL)
452 return sprintf(buf, "No data.\n"); 453 return sprintf(buf, "No data.\n");
453 454
454 memcpy(buf, skb->data, skb->len); 455 len = skb->len;
455 dev_dbg(&card->dev->dev, "len: %d\n", skb->len); 456 memcpy(buf, skb->data, len);
457 dev_dbg(&card->dev->dev, "len: %d\n", len);
456 458
457 kfree_skb(skb); 459 kfree_skb(skb);
458 return skb->len; 460 return len;
459} 461}
460 462
461static int send_command(struct solos_card *card, int dev, const char *buf, size_t size) 463static int send_command(struct solos_card *card, int dev, const char *buf, size_t size)
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index c8a44f5e0584..40af43ebd92d 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -568,7 +568,7 @@ static int _request_firmware(const struct firmware **firmware_p,
568out: 568out:
569 if (retval) { 569 if (retval) {
570 release_firmware(firmware); 570 release_firmware(firmware);
571 firmware_p = NULL; 571 *firmware_p = NULL;
572 } 572 }
573 573
574 return retval; 574 return retval;
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 5419a49ff135..276d5a701dc3 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -59,6 +59,7 @@ void device_pm_init(struct device *dev)
59{ 59{
60 dev->power.status = DPM_ON; 60 dev->power.status = DPM_ON;
61 init_completion(&dev->power.completion); 61 init_completion(&dev->power.completion);
62 complete_all(&dev->power.completion);
62 dev->power.wakeup_count = 0; 63 dev->power.wakeup_count = 0;
63 pm_runtime_init(dev); 64 pm_runtime_init(dev);
64} 65}
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 31064df1370a..5e4fadcdece9 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -297,6 +297,8 @@ static void enqueue_cmd_and_start_io(ctlr_info_t *h,
297 spin_lock_irqsave(&h->lock, flags); 297 spin_lock_irqsave(&h->lock, flags);
298 addQ(&h->reqQ, c); 298 addQ(&h->reqQ, c);
299 h->Qdepth++; 299 h->Qdepth++;
300 if (h->Qdepth > h->maxQsinceinit)
301 h->maxQsinceinit = h->Qdepth;
300 start_io(h); 302 start_io(h);
301 spin_unlock_irqrestore(&h->lock, flags); 303 spin_unlock_irqrestore(&h->lock, flags);
302} 304}
@@ -4519,6 +4521,12 @@ static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
4519 misc_fw_support = readl(&cfgtable->misc_fw_support); 4521 misc_fw_support = readl(&cfgtable->misc_fw_support);
4520 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET; 4522 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
4521 4523
4524 /* The doorbell reset seems to cause lockups on some Smart
4525 * Arrays (e.g. P410, P410i, maybe others). Until this is
4526 * fixed or at least isolated, avoid the doorbell reset.
4527 */
4528 use_doorbell = 0;
4529
4522 rc = cciss_controller_hard_reset(pdev, vaddr, use_doorbell); 4530 rc = cciss_controller_hard_reset(pdev, vaddr, use_doorbell);
4523 if (rc) 4531 if (rc)
4524 goto unmap_cfgtable; 4532 goto unmap_cfgtable;
@@ -4712,6 +4720,9 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
4712 h->scatter_list = kmalloc(h->max_commands * 4720 h->scatter_list = kmalloc(h->max_commands *
4713 sizeof(struct scatterlist *), 4721 sizeof(struct scatterlist *),
4714 GFP_KERNEL); 4722 GFP_KERNEL);
4723 if (!h->scatter_list)
4724 goto clean4;
4725
4715 for (k = 0; k < h->nr_cmds; k++) { 4726 for (k = 0; k < h->nr_cmds; k++) {
4716 h->scatter_list[k] = kmalloc(sizeof(struct scatterlist) * 4727 h->scatter_list[k] = kmalloc(sizeof(struct scatterlist) *
4717 h->maxsgentries, 4728 h->maxsgentries,
@@ -4781,7 +4792,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
4781clean4: 4792clean4:
4782 kfree(h->cmd_pool_bits); 4793 kfree(h->cmd_pool_bits);
4783 /* Free up sg elements */ 4794 /* Free up sg elements */
4784 for (k = 0; k < h->nr_cmds; k++) 4795 for (k-- ; k >= 0; k--)
4785 kfree(h->scatter_list[k]); 4796 kfree(h->scatter_list[k]);
4786 kfree(h->scatter_list); 4797 kfree(h->scatter_list);
4787 cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds); 4798 cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds);
diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c
index b82c5ce5e9df..76fa3deaee84 100644
--- a/drivers/block/mg_disk.c
+++ b/drivers/block/mg_disk.c
@@ -974,8 +974,7 @@ static int mg_probe(struct platform_device *plat_dev)
974 host->breq->queuedata = host; 974 host->breq->queuedata = host;
975 975
976 /* mflash is random device, thanx for the noop */ 976 /* mflash is random device, thanx for the noop */
977 elevator_exit(host->breq->elevator); 977 err = elevator_change(host->breq, "noop");
978 err = elevator_init(host->breq, "noop");
979 if (err) { 978 if (err) {
980 printk(KERN_ERR "%s:%d (elevator_init) fail\n", 979 printk(KERN_ERR "%s:%d (elevator_init) fail\n",
981 __func__, __LINE__); 980 __func__, __LINE__);
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 0166ea136045..1b5cfcccd654 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2368,7 +2368,7 @@ static void pkt_release_dev(struct pktcdvd_device *pd, int flush)
2368 pkt_shrink_pktlist(pd); 2368 pkt_shrink_pktlist(pd);
2369} 2369}
2370 2370
2371static struct pktcdvd_device *pkt_find_dev_from_minor(int dev_minor) 2371static struct pktcdvd_device *pkt_find_dev_from_minor(unsigned int dev_minor)
2372{ 2372{
2373 if (dev_minor >= MAX_WRITERS) 2373 if (dev_minor >= MAX_WRITERS)
2374 return NULL; 2374 return NULL;
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 126062802cb7..831e75caea3d 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -199,6 +199,7 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str)
199 struct virtio_blk *vblk = disk->private_data; 199 struct virtio_blk *vblk = disk->private_data;
200 struct request *req; 200 struct request *req;
201 struct bio *bio; 201 struct bio *bio;
202 int err;
202 203
203 bio = bio_map_kern(vblk->disk->queue, id_str, VIRTIO_BLK_ID_BYTES, 204 bio = bio_map_kern(vblk->disk->queue, id_str, VIRTIO_BLK_ID_BYTES,
204 GFP_KERNEL); 205 GFP_KERNEL);
@@ -212,7 +213,10 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str)
212 } 213 }
213 214
214 req->cmd_type = REQ_TYPE_SPECIAL; 215 req->cmd_type = REQ_TYPE_SPECIAL;
215 return blk_execute_rq(vblk->disk->queue, vblk->disk, req, false); 216 err = blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
217 blk_put_request(req);
218
219 return err;
216} 220}
217 221
218static int virtblk_locked_ioctl(struct block_device *bdev, fmode_t mode, 222static int virtblk_locked_ioctl(struct block_device *bdev, fmode_t mode,
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 0b1eea643262..f2ffc46644df 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -820,7 +820,7 @@ static int blkfront_probe(struct xenbus_device *dev,
820 char *type; 820 char *type;
821 int len; 821 int len;
822 /* no unplug has been done: do not hook devices != xen vbds */ 822 /* no unplug has been done: do not hook devices != xen vbds */
823 if (xen_platform_pci_unplug & XEN_UNPLUG_IGNORE) { 823 if (xen_platform_pci_unplug & XEN_UNPLUG_UNNECESSARY) {
824 int major; 824 int major;
825 825
826 if (!VDEV_IS_EXTENDED(vdevice)) 826 if (!VDEV_IS_EXTENDED(vdevice))
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 710af89b176d..cd18493c9527 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -12,6 +12,7 @@
12#include <asm/smp.h> 12#include <asm/smp.h>
13#include "agp.h" 13#include "agp.h"
14#include "intel-agp.h" 14#include "intel-agp.h"
15#include <linux/intel-gtt.h>
15 16
16#include "intel-gtt.c" 17#include "intel-gtt.c"
17 18
@@ -805,6 +806,8 @@ static const struct intel_driver_description {
805 "G45/G43", NULL, &intel_i965_driver }, 806 "G45/G43", NULL, &intel_i965_driver },
806 { PCI_DEVICE_ID_INTEL_B43_HB, PCI_DEVICE_ID_INTEL_B43_IG, 807 { PCI_DEVICE_ID_INTEL_B43_HB, PCI_DEVICE_ID_INTEL_B43_IG,
807 "B43", NULL, &intel_i965_driver }, 808 "B43", NULL, &intel_i965_driver },
809 { PCI_DEVICE_ID_INTEL_B43_1_HB, PCI_DEVICE_ID_INTEL_B43_1_IG,
810 "B43", NULL, &intel_i965_driver },
808 { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG, 811 { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG,
809 "G41", NULL, &intel_i965_driver }, 812 "G41", NULL, &intel_i965_driver },
810 { PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG, 813 { PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
@@ -815,11 +818,19 @@ static const struct intel_driver_description {
815 "HD Graphics", NULL, &intel_i965_driver }, 818 "HD Graphics", NULL, &intel_i965_driver },
816 { PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 819 { PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
817 "HD Graphics", NULL, &intel_i965_driver }, 820 "HD Graphics", NULL, &intel_i965_driver },
818 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG, 821 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG,
822 "Sandybridge", NULL, &intel_gen6_driver },
823 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG,
824 "Sandybridge", NULL, &intel_gen6_driver },
825 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG,
826 "Sandybridge", NULL, &intel_gen6_driver },
827 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG,
828 "Sandybridge", NULL, &intel_gen6_driver },
829 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG,
819 "Sandybridge", NULL, &intel_gen6_driver }, 830 "Sandybridge", NULL, &intel_gen6_driver },
820 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG, 831 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG,
821 "Sandybridge", NULL, &intel_gen6_driver }, 832 "Sandybridge", NULL, &intel_gen6_driver },
822 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_D0_IG, 833 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG,
823 "Sandybridge", NULL, &intel_gen6_driver }, 834 "Sandybridge", NULL, &intel_gen6_driver },
824 { 0, 0, NULL, NULL, NULL } 835 { 0, 0, NULL, NULL, NULL }
825}; 836};
@@ -1044,6 +1055,7 @@ static struct pci_device_id agp_intel_pci_table[] = {
1044 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB), 1055 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB),
1045 ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB), 1056 ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB),
1046 ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB), 1057 ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB),
1058 ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB),
1047 { } 1059 { }
1048}; 1060};
1049 1061
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
index 08d47532e605..d09b1ab7e8ab 100644
--- a/drivers/char/agp/intel-agp.h
+++ b/drivers/char/agp/intel-agp.h
@@ -1,6 +1,8 @@
1/* 1/*
2 * Common Intel AGPGART and GTT definitions. 2 * Common Intel AGPGART and GTT definitions.
3 */ 3 */
4#ifndef _INTEL_AGP_H
5#define _INTEL_AGP_H
4 6
5/* Intel registers */ 7/* Intel registers */
6#define INTEL_APSIZE 0xb4 8#define INTEL_APSIZE 0xb4
@@ -184,6 +186,8 @@
184#define PCI_DEVICE_ID_INTEL_Q33_IG 0x29D2 186#define PCI_DEVICE_ID_INTEL_Q33_IG 0x29D2
185#define PCI_DEVICE_ID_INTEL_B43_HB 0x2E40 187#define PCI_DEVICE_ID_INTEL_B43_HB 0x2E40
186#define PCI_DEVICE_ID_INTEL_B43_IG 0x2E42 188#define PCI_DEVICE_ID_INTEL_B43_IG 0x2E42
189#define PCI_DEVICE_ID_INTEL_B43_1_HB 0x2E90
190#define PCI_DEVICE_ID_INTEL_B43_1_IG 0x2E92
187#define PCI_DEVICE_ID_INTEL_GM45_HB 0x2A40 191#define PCI_DEVICE_ID_INTEL_GM45_HB 0x2A40
188#define PCI_DEVICE_ID_INTEL_GM45_IG 0x2A42 192#define PCI_DEVICE_ID_INTEL_GM45_IG 0x2A42
189#define PCI_DEVICE_ID_INTEL_EAGLELAKE_HB 0x2E00 193#define PCI_DEVICE_ID_INTEL_EAGLELAKE_HB 0x2E00
@@ -200,11 +204,16 @@
200#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062 204#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062
201#define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a 205#define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a
202#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046 206#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046
203#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB 0x0100 207#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB 0x0100 /* Desktop */
204#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG 0x0102 208#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG 0x0102
205#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB 0x0104 209#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG 0x0112
206#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG 0x0106 210#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG 0x0122
207#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_D0_IG 0x0126 211#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB 0x0104 /* Mobile */
212#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG 0x0106
213#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG 0x0116
214#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG 0x0126
215#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB 0x0108 /* Server */
216#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG 0x010A
208 217
209/* cover 915 and 945 variants */ 218/* cover 915 and 945 variants */
210#define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \ 219#define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \
@@ -231,7 +240,8 @@
231 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB) 240 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB)
232 241
233#define IS_SNB (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || \ 242#define IS_SNB (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || \
234 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) 243 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB || \
244 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB)
235 245
236#define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_EAGLELAKE_HB || \ 246#define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_EAGLELAKE_HB || \
237 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \ 247 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \
@@ -244,3 +254,5 @@
244 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB || \ 254 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB || \
245 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB || \ 255 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB || \
246 IS_SNB) 256 IS_SNB)
257
258#endif
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index d22ffb811bf2..75e0a3497888 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -49,6 +49,26 @@ static struct gatt_mask intel_i810_masks[] =
49 .type = INTEL_AGP_CACHED_MEMORY} 49 .type = INTEL_AGP_CACHED_MEMORY}
50}; 50};
51 51
52#define INTEL_AGP_UNCACHED_MEMORY 0
53#define INTEL_AGP_CACHED_MEMORY_LLC 1
54#define INTEL_AGP_CACHED_MEMORY_LLC_GFDT 2
55#define INTEL_AGP_CACHED_MEMORY_LLC_MLC 3
56#define INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT 4
57
58static struct gatt_mask intel_gen6_masks[] =
59{
60 {.mask = I810_PTE_VALID | GEN6_PTE_UNCACHED,
61 .type = INTEL_AGP_UNCACHED_MEMORY },
62 {.mask = I810_PTE_VALID | GEN6_PTE_LLC,
63 .type = INTEL_AGP_CACHED_MEMORY_LLC },
64 {.mask = I810_PTE_VALID | GEN6_PTE_LLC | GEN6_PTE_GFDT,
65 .type = INTEL_AGP_CACHED_MEMORY_LLC_GFDT },
66 {.mask = I810_PTE_VALID | GEN6_PTE_LLC_MLC,
67 .type = INTEL_AGP_CACHED_MEMORY_LLC_MLC },
68 {.mask = I810_PTE_VALID | GEN6_PTE_LLC_MLC | GEN6_PTE_GFDT,
69 .type = INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT },
70};
71
52static struct _intel_private { 72static struct _intel_private {
53 struct pci_dev *pcidev; /* device one */ 73 struct pci_dev *pcidev; /* device one */
54 u8 __iomem *registers; 74 u8 __iomem *registers;
@@ -178,13 +198,6 @@ static void intel_agp_insert_sg_entries(struct agp_memory *mem,
178 off_t pg_start, int mask_type) 198 off_t pg_start, int mask_type)
179{ 199{
180 int i, j; 200 int i, j;
181 u32 cache_bits = 0;
182
183 if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
184 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB)
185 {
186 cache_bits = GEN6_PTE_LLC_MLC;
187 }
188 201
189 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { 202 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
190 writel(agp_bridge->driver->mask_memory(agp_bridge, 203 writel(agp_bridge->driver->mask_memory(agp_bridge,
@@ -317,6 +330,23 @@ static int intel_i830_type_to_mask_type(struct agp_bridge_data *bridge,
317 return 0; 330 return 0;
318} 331}
319 332
333static int intel_gen6_type_to_mask_type(struct agp_bridge_data *bridge,
334 int type)
335{
336 unsigned int type_mask = type & ~AGP_USER_CACHED_MEMORY_GFDT;
337 unsigned int gfdt = type & AGP_USER_CACHED_MEMORY_GFDT;
338
339 if (type_mask == AGP_USER_UNCACHED_MEMORY)
340 return INTEL_AGP_UNCACHED_MEMORY;
341 else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC)
342 return gfdt ? INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT :
343 INTEL_AGP_CACHED_MEMORY_LLC_MLC;
344 else /* set 'normal'/'cached' to LLC by default */
345 return gfdt ? INTEL_AGP_CACHED_MEMORY_LLC_GFDT :
346 INTEL_AGP_CACHED_MEMORY_LLC;
347}
348
349
320static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start, 350static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
321 int type) 351 int type)
322{ 352{
@@ -588,8 +618,7 @@ static void intel_i830_init_gtt_entries(void)
588 gtt_entries = 0; 618 gtt_entries = 0;
589 break; 619 break;
590 } 620 }
591 } else if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || 621 } else if (IS_SNB) {
592 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) {
593 /* 622 /*
594 * SandyBridge has new memory control reg at 0x50.w 623 * SandyBridge has new memory control reg at 0x50.w
595 */ 624 */
@@ -1068,11 +1097,11 @@ static void intel_i9xx_setup_flush(void)
1068 intel_i915_setup_chipset_flush(); 1097 intel_i915_setup_chipset_flush();
1069 } 1098 }
1070 1099
1071 if (intel_private.ifp_resource.start) { 1100 if (intel_private.ifp_resource.start)
1072 intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE); 1101 intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
1073 if (!intel_private.i9xx_flush_page) 1102 if (!intel_private.i9xx_flush_page)
1074 dev_info(&intel_private.pcidev->dev, "can't ioremap flush page - no chipset flushing"); 1103 dev_err(&intel_private.pcidev->dev,
1075 } 1104 "can't ioremap flush page - no chipset flushing\n");
1076} 1105}
1077 1106
1078static int intel_i9xx_configure(void) 1107static int intel_i9xx_configure(void)
@@ -1163,7 +1192,7 @@ static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start,
1163 1192
1164 mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type); 1193 mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
1165 1194
1166 if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY && 1195 if (!IS_SNB && mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
1167 mask_type != INTEL_AGP_CACHED_MEMORY) 1196 mask_type != INTEL_AGP_CACHED_MEMORY)
1168 goto out_err; 1197 goto out_err;
1169 1198
@@ -1333,8 +1362,8 @@ static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge,
1333static unsigned long intel_gen6_mask_memory(struct agp_bridge_data *bridge, 1362static unsigned long intel_gen6_mask_memory(struct agp_bridge_data *bridge,
1334 dma_addr_t addr, int type) 1363 dma_addr_t addr, int type)
1335{ 1364{
1336 /* Shift high bits down */ 1365 /* gen6 has bit11-4 for physical addr bit39-32 */
1337 addr |= (addr >> 28) & 0xff; 1366 addr |= (addr >> 28) & 0xff0;
1338 1367
1339 /* Type checking must be done elsewhere */ 1368 /* Type checking must be done elsewhere */
1340 return addr | bridge->driver->masks[type].mask; 1369 return addr | bridge->driver->masks[type].mask;
@@ -1359,6 +1388,7 @@ static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
1359 break; 1388 break;
1360 case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB: 1389 case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB:
1361 case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB: 1390 case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB:
1391 case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB:
1362 *gtt_offset = MB(2); 1392 *gtt_offset = MB(2);
1363 1393
1364 pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl); 1394 pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
@@ -1563,7 +1593,7 @@ static const struct agp_bridge_driver intel_gen6_driver = {
1563 .fetch_size = intel_i9xx_fetch_size, 1593 .fetch_size = intel_i9xx_fetch_size,
1564 .cleanup = intel_i915_cleanup, 1594 .cleanup = intel_i915_cleanup,
1565 .mask_memory = intel_gen6_mask_memory, 1595 .mask_memory = intel_gen6_mask_memory,
1566 .masks = intel_i810_masks, 1596 .masks = intel_gen6_masks,
1567 .agp_enable = intel_i810_agp_enable, 1597 .agp_enable = intel_i810_agp_enable,
1568 .cache_flush = global_cache_flush, 1598 .cache_flush = global_cache_flush,
1569 .create_gatt_table = intel_i965_create_gatt_table, 1599 .create_gatt_table = intel_i965_create_gatt_table,
@@ -1576,7 +1606,7 @@ static const struct agp_bridge_driver intel_gen6_driver = {
1576 .agp_alloc_pages = agp_generic_alloc_pages, 1606 .agp_alloc_pages = agp_generic_alloc_pages,
1577 .agp_destroy_page = agp_generic_destroy_page, 1607 .agp_destroy_page = agp_generic_destroy_page,
1578 .agp_destroy_pages = agp_generic_destroy_pages, 1608 .agp_destroy_pages = agp_generic_destroy_pages,
1579 .agp_type_to_mask_type = intel_i830_type_to_mask_type, 1609 .agp_type_to_mask_type = intel_gen6_type_to_mask_type,
1580 .chipset_flush = intel_i915_chipset_flush, 1610 .chipset_flush = intel_i915_chipset_flush,
1581#ifdef USE_PCI_DMA_API 1611#ifdef USE_PCI_DMA_API
1582 .agp_map_page = intel_agp_map_page, 1612 .agp_map_page = intel_agp_map_page,
diff --git a/drivers/char/hangcheck-timer.c b/drivers/char/hangcheck-timer.c
index e0249722d25f..f953c96efc86 100644
--- a/drivers/char/hangcheck-timer.c
+++ b/drivers/char/hangcheck-timer.c
@@ -159,7 +159,7 @@ static void hangcheck_fire(unsigned long data)
159 if (hangcheck_dump_tasks) { 159 if (hangcheck_dump_tasks) {
160 printk(KERN_CRIT "Hangcheck: Task state:\n"); 160 printk(KERN_CRIT "Hangcheck: Task state:\n");
161#ifdef CONFIG_MAGIC_SYSRQ 161#ifdef CONFIG_MAGIC_SYSRQ
162 handle_sysrq('t', NULL); 162 handle_sysrq('t');
163#endif /* CONFIG_MAGIC_SYSRQ */ 163#endif /* CONFIG_MAGIC_SYSRQ */
164 } 164 }
165 if (hangcheck_reboot) { 165 if (hangcheck_reboot) {
diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
index fa27d1676ee5..3afd62e856eb 100644
--- a/drivers/char/hvc_console.c
+++ b/drivers/char/hvc_console.c
@@ -651,7 +651,7 @@ int hvc_poll(struct hvc_struct *hp)
651 if (sysrq_pressed) 651 if (sysrq_pressed)
652 continue; 652 continue;
653 } else if (sysrq_pressed) { 653 } else if (sysrq_pressed) {
654 handle_sysrq(buf[i], tty); 654 handle_sysrq(buf[i]);
655 sysrq_pressed = 0; 655 sysrq_pressed = 0;
656 continue; 656 continue;
657 } 657 }
diff --git a/drivers/char/hvsi.c b/drivers/char/hvsi.c
index 1f4b6de65a2d..a2bc885ce60a 100644
--- a/drivers/char/hvsi.c
+++ b/drivers/char/hvsi.c
@@ -403,7 +403,7 @@ static void hvsi_insert_chars(struct hvsi_struct *hp, const char *buf, int len)
403 hp->sysrq = 1; 403 hp->sysrq = 1;
404 continue; 404 continue;
405 } else if (hp->sysrq) { 405 } else if (hp->sysrq) {
406 handle_sysrq(c, hp->tty); 406 handle_sysrq(c);
407 hp->sysrq = 0; 407 hp->sysrq = 0;
408 continue; 408 continue;
409 } 409 }
diff --git a/drivers/char/hw_random/n2-drv.c b/drivers/char/hw_random/n2-drv.c
index 1acdb2509511..a3f5e381e746 100644
--- a/drivers/char/hw_random/n2-drv.c
+++ b/drivers/char/hw_random/n2-drv.c
@@ -387,7 +387,7 @@ static int n2rng_init_control(struct n2rng *np)
387 387
388static int n2rng_data_read(struct hwrng *rng, u32 *data) 388static int n2rng_data_read(struct hwrng *rng, u32 *data)
389{ 389{
390 struct n2rng *np = rng->priv; 390 struct n2rng *np = (struct n2rng *) rng->priv;
391 unsigned long ra = __pa(&np->test_data); 391 unsigned long ra = __pa(&np->test_data);
392 int len; 392 int len;
393 393
diff --git a/drivers/char/ip2/ip2main.c b/drivers/char/ip2/ip2main.c
index 07f3ea38b582..d4b71e8d0d23 100644
--- a/drivers/char/ip2/ip2main.c
+++ b/drivers/char/ip2/ip2main.c
@@ -1650,7 +1650,7 @@ ip2_close( PTTY tty, struct file *pFile )
1650 /* disable DSS reporting */ 1650 /* disable DSS reporting */
1651 i2QueueCommands(PTYPE_INLINE, pCh, 100, 4, 1651 i2QueueCommands(PTYPE_INLINE, pCh, 100, 4,
1652 CMD_DCD_NREP, CMD_CTS_NREP, CMD_DSR_NREP, CMD_RI_NREP); 1652 CMD_DCD_NREP, CMD_CTS_NREP, CMD_DSR_NREP, CMD_RI_NREP);
1653 if ( !tty || (tty->termios->c_cflag & HUPCL) ) { 1653 if (tty->termios->c_cflag & HUPCL) {
1654 i2QueueCommands(PTYPE_INLINE, pCh, 100, 2, CMD_RTSDN, CMD_DTRDN); 1654 i2QueueCommands(PTYPE_INLINE, pCh, 100, 2, CMD_RTSDN, CMD_DTRDN);
1655 pCh->dataSetOut &= ~(I2_DTR | I2_RTS); 1655 pCh->dataSetOut &= ~(I2_DTR | I2_RTS);
1656 i2QueueCommands( PTYPE_INLINE, pCh, 100, 1, CMD_PAUSE(25)); 1656 i2QueueCommands( PTYPE_INLINE, pCh, 100, 1, CMD_PAUSE(25));
@@ -2930,6 +2930,8 @@ ip2_ipl_ioctl (struct file *pFile, UINT cmd, ULONG arg )
2930 if ( pCh ) 2930 if ( pCh )
2931 { 2931 {
2932 rc = copy_to_user(argp, pCh, sizeof(i2ChanStr)); 2932 rc = copy_to_user(argp, pCh, sizeof(i2ChanStr));
2933 if (rc)
2934 rc = -EFAULT;
2933 } else { 2935 } else {
2934 rc = -ENODEV; 2936 rc = -ENODEV;
2935 } 2937 }
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 3822b4f49c84..7bd7c45b53ef 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -305,6 +305,9 @@ static int num_force_kipmid;
305#ifdef CONFIG_PCI 305#ifdef CONFIG_PCI
306static int pci_registered; 306static int pci_registered;
307#endif 307#endif
308#ifdef CONFIG_ACPI
309static int pnp_registered;
310#endif
308#ifdef CONFIG_PPC_OF 311#ifdef CONFIG_PPC_OF
309static int of_registered; 312static int of_registered;
310#endif 313#endif
@@ -2126,7 +2129,7 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev,
2126{ 2129{
2127 struct acpi_device *acpi_dev; 2130 struct acpi_device *acpi_dev;
2128 struct smi_info *info; 2131 struct smi_info *info;
2129 struct resource *res; 2132 struct resource *res, *res_second;
2130 acpi_handle handle; 2133 acpi_handle handle;
2131 acpi_status status; 2134 acpi_status status;
2132 unsigned long long tmp; 2135 unsigned long long tmp;
@@ -2182,13 +2185,13 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev,
2182 info->io.addr_data = res->start; 2185 info->io.addr_data = res->start;
2183 2186
2184 info->io.regspacing = DEFAULT_REGSPACING; 2187 info->io.regspacing = DEFAULT_REGSPACING;
2185 res = pnp_get_resource(dev, 2188 res_second = pnp_get_resource(dev,
2186 (info->io.addr_type == IPMI_IO_ADDR_SPACE) ? 2189 (info->io.addr_type == IPMI_IO_ADDR_SPACE) ?
2187 IORESOURCE_IO : IORESOURCE_MEM, 2190 IORESOURCE_IO : IORESOURCE_MEM,
2188 1); 2191 1);
2189 if (res) { 2192 if (res_second) {
2190 if (res->start > info->io.addr_data) 2193 if (res_second->start > info->io.addr_data)
2191 info->io.regspacing = res->start - info->io.addr_data; 2194 info->io.regspacing = res_second->start - info->io.addr_data;
2192 } 2195 }
2193 info->io.regsize = DEFAULT_REGSPACING; 2196 info->io.regsize = DEFAULT_REGSPACING;
2194 info->io.regshift = 0; 2197 info->io.regshift = 0;
@@ -3359,6 +3362,7 @@ static __devinit int init_ipmi_si(void)
3359 3362
3360#ifdef CONFIG_ACPI 3363#ifdef CONFIG_ACPI
3361 pnp_register_driver(&ipmi_pnp_driver); 3364 pnp_register_driver(&ipmi_pnp_driver);
3365 pnp_registered = 1;
3362#endif 3366#endif
3363 3367
3364#ifdef CONFIG_DMI 3368#ifdef CONFIG_DMI
@@ -3526,7 +3530,8 @@ static __exit void cleanup_ipmi_si(void)
3526 pci_unregister_driver(&ipmi_pci_driver); 3530 pci_unregister_driver(&ipmi_pci_driver);
3527#endif 3531#endif
3528#ifdef CONFIG_ACPI 3532#ifdef CONFIG_ACPI
3529 pnp_unregister_driver(&ipmi_pnp_driver); 3533 if (pnp_registered)
3534 pnp_unregister_driver(&ipmi_pnp_driver);
3530#endif 3535#endif
3531 3536
3532#ifdef CONFIG_PPC_OF 3537#ifdef CONFIG_PPC_OF
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index a398ecdbd758..1f528fad3516 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -788,10 +788,11 @@ static const struct file_operations zero_fops = {
788/* 788/*
789 * capabilities for /dev/zero 789 * capabilities for /dev/zero
790 * - permits private mappings, "copies" are taken of the source of zeros 790 * - permits private mappings, "copies" are taken of the source of zeros
791 * - no writeback happens
791 */ 792 */
792static struct backing_dev_info zero_bdi = { 793static struct backing_dev_info zero_bdi = {
793 .name = "char/mem", 794 .name = "char/mem",
794 .capabilities = BDI_CAP_MAP_COPY, 795 .capabilities = BDI_CAP_MAP_COPY | BDI_CAP_NO_ACCT_AND_WRITEBACK,
795}; 796};
796 797
797static const struct file_operations full_fops = { 798static const struct file_operations full_fops = {
diff --git a/drivers/char/rocket.c b/drivers/char/rocket.c
index 79c3bc69165a..7c79d243acc9 100644
--- a/drivers/char/rocket.c
+++ b/drivers/char/rocket.c
@@ -1244,6 +1244,7 @@ static int set_config(struct tty_struct *tty, struct r_port *info,
1244 } 1244 }
1245 info->flags = ((info->flags & ~ROCKET_USR_MASK) | (new_serial.flags & ROCKET_USR_MASK)); 1245 info->flags = ((info->flags & ~ROCKET_USR_MASK) | (new_serial.flags & ROCKET_USR_MASK));
1246 configure_r_port(tty, info, NULL); 1246 configure_r_port(tty, info, NULL);
1247 mutex_unlock(&info->port.mutex);
1247 return 0; 1248 return 0;
1248 } 1249 }
1249 1250
diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c
index fef80cfcab5c..e63b830c86cc 100644
--- a/drivers/char/synclink_gt.c
+++ b/drivers/char/synclink_gt.c
@@ -691,8 +691,10 @@ static int open(struct tty_struct *tty, struct file *filp)
691 if (info->port.count == 1) { 691 if (info->port.count == 1) {
692 /* 1st open on this device, init hardware */ 692 /* 1st open on this device, init hardware */
693 retval = startup(info); 693 retval = startup(info);
694 if (retval < 0) 694 if (retval < 0) {
695 mutex_unlock(&info->port.mutex);
695 goto cleanup; 696 goto cleanup;
697 }
696 } 698 }
697 mutex_unlock(&info->port.mutex); 699 mutex_unlock(&info->port.mutex);
698 retval = block_til_ready(tty, filp, info); 700 retval = block_til_ready(tty, filp, info);
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c
index 878ac0c2cc68..ef31bb81e843 100644
--- a/drivers/char/sysrq.c
+++ b/drivers/char/sysrq.c
@@ -18,7 +18,6 @@
18#include <linux/interrupt.h> 18#include <linux/interrupt.h>
19#include <linux/mm.h> 19#include <linux/mm.h>
20#include <linux/fs.h> 20#include <linux/fs.h>
21#include <linux/tty.h>
22#include <linux/mount.h> 21#include <linux/mount.h>
23#include <linux/kdev_t.h> 22#include <linux/kdev_t.h>
24#include <linux/major.h> 23#include <linux/major.h>
@@ -76,7 +75,7 @@ static int __init sysrq_always_enabled_setup(char *str)
76__setup("sysrq_always_enabled", sysrq_always_enabled_setup); 75__setup("sysrq_always_enabled", sysrq_always_enabled_setup);
77 76
78 77
79static void sysrq_handle_loglevel(int key, struct tty_struct *tty) 78static void sysrq_handle_loglevel(int key)
80{ 79{
81 int i; 80 int i;
82 81
@@ -93,7 +92,7 @@ static struct sysrq_key_op sysrq_loglevel_op = {
93}; 92};
94 93
95#ifdef CONFIG_VT 94#ifdef CONFIG_VT
96static void sysrq_handle_SAK(int key, struct tty_struct *tty) 95static void sysrq_handle_SAK(int key)
97{ 96{
98 struct work_struct *SAK_work = &vc_cons[fg_console].SAK_work; 97 struct work_struct *SAK_work = &vc_cons[fg_console].SAK_work;
99 schedule_work(SAK_work); 98 schedule_work(SAK_work);
@@ -109,7 +108,7 @@ static struct sysrq_key_op sysrq_SAK_op = {
109#endif 108#endif
110 109
111#ifdef CONFIG_VT 110#ifdef CONFIG_VT
112static void sysrq_handle_unraw(int key, struct tty_struct *tty) 111static void sysrq_handle_unraw(int key)
113{ 112{
114 struct kbd_struct *kbd = &kbd_table[fg_console]; 113 struct kbd_struct *kbd = &kbd_table[fg_console];
115 114
@@ -126,7 +125,7 @@ static struct sysrq_key_op sysrq_unraw_op = {
126#define sysrq_unraw_op (*(struct sysrq_key_op *)NULL) 125#define sysrq_unraw_op (*(struct sysrq_key_op *)NULL)
127#endif /* CONFIG_VT */ 126#endif /* CONFIG_VT */
128 127
129static void sysrq_handle_crash(int key, struct tty_struct *tty) 128static void sysrq_handle_crash(int key)
130{ 129{
131 char *killer = NULL; 130 char *killer = NULL;
132 131
@@ -141,7 +140,7 @@ static struct sysrq_key_op sysrq_crash_op = {
141 .enable_mask = SYSRQ_ENABLE_DUMP, 140 .enable_mask = SYSRQ_ENABLE_DUMP,
142}; 141};
143 142
144static void sysrq_handle_reboot(int key, struct tty_struct *tty) 143static void sysrq_handle_reboot(int key)
145{ 144{
146 lockdep_off(); 145 lockdep_off();
147 local_irq_enable(); 146 local_irq_enable();
@@ -154,7 +153,7 @@ static struct sysrq_key_op sysrq_reboot_op = {
154 .enable_mask = SYSRQ_ENABLE_BOOT, 153 .enable_mask = SYSRQ_ENABLE_BOOT,
155}; 154};
156 155
157static void sysrq_handle_sync(int key, struct tty_struct *tty) 156static void sysrq_handle_sync(int key)
158{ 157{
159 emergency_sync(); 158 emergency_sync();
160} 159}
@@ -165,7 +164,7 @@ static struct sysrq_key_op sysrq_sync_op = {
165 .enable_mask = SYSRQ_ENABLE_SYNC, 164 .enable_mask = SYSRQ_ENABLE_SYNC,
166}; 165};
167 166
168static void sysrq_handle_show_timers(int key, struct tty_struct *tty) 167static void sysrq_handle_show_timers(int key)
169{ 168{
170 sysrq_timer_list_show(); 169 sysrq_timer_list_show();
171} 170}
@@ -176,7 +175,7 @@ static struct sysrq_key_op sysrq_show_timers_op = {
176 .action_msg = "Show clockevent devices & pending hrtimers (no others)", 175 .action_msg = "Show clockevent devices & pending hrtimers (no others)",
177}; 176};
178 177
179static void sysrq_handle_mountro(int key, struct tty_struct *tty) 178static void sysrq_handle_mountro(int key)
180{ 179{
181 emergency_remount(); 180 emergency_remount();
182} 181}
@@ -188,7 +187,7 @@ static struct sysrq_key_op sysrq_mountro_op = {
188}; 187};
189 188
190#ifdef CONFIG_LOCKDEP 189#ifdef CONFIG_LOCKDEP
191static void sysrq_handle_showlocks(int key, struct tty_struct *tty) 190static void sysrq_handle_showlocks(int key)
192{ 191{
193 debug_show_all_locks(); 192 debug_show_all_locks();
194} 193}
@@ -226,7 +225,7 @@ static void sysrq_showregs_othercpus(struct work_struct *dummy)
226 225
227static DECLARE_WORK(sysrq_showallcpus, sysrq_showregs_othercpus); 226static DECLARE_WORK(sysrq_showallcpus, sysrq_showregs_othercpus);
228 227
229static void sysrq_handle_showallcpus(int key, struct tty_struct *tty) 228static void sysrq_handle_showallcpus(int key)
230{ 229{
231 /* 230 /*
232 * Fall back to the workqueue based printing if the 231 * Fall back to the workqueue based printing if the
@@ -252,7 +251,7 @@ static struct sysrq_key_op sysrq_showallcpus_op = {
252}; 251};
253#endif 252#endif
254 253
255static void sysrq_handle_showregs(int key, struct tty_struct *tty) 254static void sysrq_handle_showregs(int key)
256{ 255{
257 struct pt_regs *regs = get_irq_regs(); 256 struct pt_regs *regs = get_irq_regs();
258 if (regs) 257 if (regs)
@@ -266,7 +265,7 @@ static struct sysrq_key_op sysrq_showregs_op = {
266 .enable_mask = SYSRQ_ENABLE_DUMP, 265 .enable_mask = SYSRQ_ENABLE_DUMP,
267}; 266};
268 267
269static void sysrq_handle_showstate(int key, struct tty_struct *tty) 268static void sysrq_handle_showstate(int key)
270{ 269{
271 show_state(); 270 show_state();
272} 271}
@@ -277,7 +276,7 @@ static struct sysrq_key_op sysrq_showstate_op = {
277 .enable_mask = SYSRQ_ENABLE_DUMP, 276 .enable_mask = SYSRQ_ENABLE_DUMP,
278}; 277};
279 278
280static void sysrq_handle_showstate_blocked(int key, struct tty_struct *tty) 279static void sysrq_handle_showstate_blocked(int key)
281{ 280{
282 show_state_filter(TASK_UNINTERRUPTIBLE); 281 show_state_filter(TASK_UNINTERRUPTIBLE);
283} 282}
@@ -291,7 +290,7 @@ static struct sysrq_key_op sysrq_showstate_blocked_op = {
291#ifdef CONFIG_TRACING 290#ifdef CONFIG_TRACING
292#include <linux/ftrace.h> 291#include <linux/ftrace.h>
293 292
294static void sysrq_ftrace_dump(int key, struct tty_struct *tty) 293static void sysrq_ftrace_dump(int key)
295{ 294{
296 ftrace_dump(DUMP_ALL); 295 ftrace_dump(DUMP_ALL);
297} 296}
@@ -305,7 +304,7 @@ static struct sysrq_key_op sysrq_ftrace_dump_op = {
305#define sysrq_ftrace_dump_op (*(struct sysrq_key_op *)NULL) 304#define sysrq_ftrace_dump_op (*(struct sysrq_key_op *)NULL)
306#endif 305#endif
307 306
308static void sysrq_handle_showmem(int key, struct tty_struct *tty) 307static void sysrq_handle_showmem(int key)
309{ 308{
310 show_mem(); 309 show_mem();
311} 310}
@@ -330,7 +329,7 @@ static void send_sig_all(int sig)
330 } 329 }
331} 330}
332 331
333static void sysrq_handle_term(int key, struct tty_struct *tty) 332static void sysrq_handle_term(int key)
334{ 333{
335 send_sig_all(SIGTERM); 334 send_sig_all(SIGTERM);
336 console_loglevel = 8; 335 console_loglevel = 8;
@@ -349,7 +348,7 @@ static void moom_callback(struct work_struct *ignored)
349 348
350static DECLARE_WORK(moom_work, moom_callback); 349static DECLARE_WORK(moom_work, moom_callback);
351 350
352static void sysrq_handle_moom(int key, struct tty_struct *tty) 351static void sysrq_handle_moom(int key)
353{ 352{
354 schedule_work(&moom_work); 353 schedule_work(&moom_work);
355} 354}
@@ -361,7 +360,7 @@ static struct sysrq_key_op sysrq_moom_op = {
361}; 360};
362 361
363#ifdef CONFIG_BLOCK 362#ifdef CONFIG_BLOCK
364static void sysrq_handle_thaw(int key, struct tty_struct *tty) 363static void sysrq_handle_thaw(int key)
365{ 364{
366 emergency_thaw_all(); 365 emergency_thaw_all();
367} 366}
@@ -373,7 +372,7 @@ static struct sysrq_key_op sysrq_thaw_op = {
373}; 372};
374#endif 373#endif
375 374
376static void sysrq_handle_kill(int key, struct tty_struct *tty) 375static void sysrq_handle_kill(int key)
377{ 376{
378 send_sig_all(SIGKILL); 377 send_sig_all(SIGKILL);
379 console_loglevel = 8; 378 console_loglevel = 8;
@@ -385,7 +384,7 @@ static struct sysrq_key_op sysrq_kill_op = {
385 .enable_mask = SYSRQ_ENABLE_SIGNAL, 384 .enable_mask = SYSRQ_ENABLE_SIGNAL,
386}; 385};
387 386
388static void sysrq_handle_unrt(int key, struct tty_struct *tty) 387static void sysrq_handle_unrt(int key)
389{ 388{
390 normalize_rt_tasks(); 389 normalize_rt_tasks();
391} 390}
@@ -493,7 +492,7 @@ static void __sysrq_put_key_op(int key, struct sysrq_key_op *op_p)
493 sysrq_key_table[i] = op_p; 492 sysrq_key_table[i] = op_p;
494} 493}
495 494
496void __handle_sysrq(int key, struct tty_struct *tty, int check_mask) 495void __handle_sysrq(int key, bool check_mask)
497{ 496{
498 struct sysrq_key_op *op_p; 497 struct sysrq_key_op *op_p;
499 int orig_log_level; 498 int orig_log_level;
@@ -520,7 +519,7 @@ void __handle_sysrq(int key, struct tty_struct *tty, int check_mask)
520 if (!check_mask || sysrq_on_mask(op_p->enable_mask)) { 519 if (!check_mask || sysrq_on_mask(op_p->enable_mask)) {
521 printk("%s\n", op_p->action_msg); 520 printk("%s\n", op_p->action_msg);
522 console_loglevel = orig_log_level; 521 console_loglevel = orig_log_level;
523 op_p->handler(key, tty); 522 op_p->handler(key);
524 } else { 523 } else {
525 printk("This sysrq operation is disabled.\n"); 524 printk("This sysrq operation is disabled.\n");
526 } 525 }
@@ -545,10 +544,10 @@ void __handle_sysrq(int key, struct tty_struct *tty, int check_mask)
545 spin_unlock_irqrestore(&sysrq_key_table_lock, flags); 544 spin_unlock_irqrestore(&sysrq_key_table_lock, flags);
546} 545}
547 546
548void handle_sysrq(int key, struct tty_struct *tty) 547void handle_sysrq(int key)
549{ 548{
550 if (sysrq_on()) 549 if (sysrq_on())
551 __handle_sysrq(key, tty, 1); 550 __handle_sysrq(key, true);
552} 551}
553EXPORT_SYMBOL(handle_sysrq); 552EXPORT_SYMBOL(handle_sysrq);
554 553
@@ -597,7 +596,7 @@ static bool sysrq_filter(struct input_handle *handle, unsigned int type,
597 596
598 default: 597 default:
599 if (sysrq_down && value && value != 2) 598 if (sysrq_down && value && value != 2)
600 __handle_sysrq(sysrq_xlate[code], NULL, 1); 599 __handle_sysrq(sysrq_xlate[code], true);
601 break; 600 break;
602 } 601 }
603 602
@@ -765,7 +764,7 @@ static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
765 764
766 if (get_user(c, buf)) 765 if (get_user(c, buf))
767 return -EFAULT; 766 return -EFAULT;
768 __handle_sysrq(c, NULL, 0); 767 __handle_sysrq(c, false);
769 } 768 }
770 769
771 return count; 770 return count;
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index 949067a0bd47..613c852ee0fe 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -355,7 +355,7 @@ struct tty_driver *tty_find_polling_driver(char *name, int *line)
355 if (*stp == '\0') 355 if (*stp == '\0')
356 stp = NULL; 356 stp = NULL;
357 357
358 if (tty_line >= 0 && tty_line <= p->num && p->ops && 358 if (tty_line >= 0 && tty_line < p->num && p->ops &&
359 p->ops->poll_init && !p->ops->poll_init(p, tty_line, stp)) { 359 p->ops->poll_init && !p->ops->poll_init(p, tty_line, stp)) {
360 res = tty_driver_kref_get(p); 360 res = tty_driver_kref_get(p);
361 *line = tty_line; 361 *line = tty_line;
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 942a9826bd23..c810481a5bc2 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -596,6 +596,10 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
596 ssize_t ret; 596 ssize_t ret;
597 bool nonblock; 597 bool nonblock;
598 598
599 /* Userspace could be out to fool us */
600 if (!count)
601 return 0;
602
599 port = filp->private_data; 603 port = filp->private_data;
600 604
601 nonblock = filp->f_flags & O_NONBLOCK; 605 nonblock = filp->f_flags & O_NONBLOCK;
@@ -642,7 +646,7 @@ static unsigned int port_fops_poll(struct file *filp, poll_table *wait)
642 poll_wait(filp, &port->waitqueue, wait); 646 poll_wait(filp, &port->waitqueue, wait);
643 647
644 ret = 0; 648 ret = 0;
645 if (port->inbuf) 649 if (!will_read_block(port))
646 ret |= POLLIN | POLLRDNORM; 650 ret |= POLLIN | POLLRDNORM;
647 if (!will_write_block(port)) 651 if (!will_write_block(port))
648 ret |= POLLOUT; 652 ret |= POLLOUT;
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index 50590c7f2c01..281aada7b4a1 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -906,22 +906,16 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
906 * bottom of buffer 906 * bottom of buffer
907 */ 907 */
908 old_origin += (old_rows - new_rows) * old_row_size; 908 old_origin += (old_rows - new_rows) * old_row_size;
909 end = vc->vc_scr_end;
910 } else { 909 } else {
911 /* 910 /*
912 * Cursor is in no man's land, copy 1/2 screenful 911 * Cursor is in no man's land, copy 1/2 screenful
913 * from the top and bottom of cursor position 912 * from the top and bottom of cursor position
914 */ 913 */
915 old_origin += (vc->vc_y - new_rows/2) * old_row_size; 914 old_origin += (vc->vc_y - new_rows/2) * old_row_size;
916 end = old_origin + (old_row_size * new_rows);
917 } 915 }
918 } else 916 }
919 /* 917
920 * Cursor near the top, copy contents from the top of buffer 918 end = old_origin + old_row_size * min(old_rows, new_rows);
921 */
922 end = (old_rows > new_rows) ? old_origin +
923 (old_row_size * new_rows) :
924 vc->vc_scr_end;
925 919
926 update_attr(vc); 920 update_attr(vc);
927 921
@@ -3075,8 +3069,7 @@ static int bind_con_driver(const struct consw *csw, int first, int last,
3075 3069
3076 old_was_color = vc->vc_can_do_color; 3070 old_was_color = vc->vc_can_do_color;
3077 vc->vc_sw->con_deinit(vc); 3071 vc->vc_sw->con_deinit(vc);
3078 if (!vc->vc_origin) 3072 vc->vc_origin = (unsigned long)vc->vc_screenbuf;
3079 vc->vc_origin = (unsigned long)vc->vc_screenbuf;
3080 visual_init(vc, i, 0); 3073 visual_init(vc, i, 0);
3081 set_origin(vc); 3074 set_origin(vc);
3082 update_attr(vc); 3075 update_attr(vc);
diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c
index 2bbeaaea46e9..38df8c19e74c 100644
--- a/drivers/char/vt_ioctl.c
+++ b/drivers/char/vt_ioctl.c
@@ -533,11 +533,14 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
533 case KIOCSOUND: 533 case KIOCSOUND:
534 if (!perm) 534 if (!perm)
535 goto eperm; 535 goto eperm;
536 /* FIXME: This is an old broken API but we need to keep it 536 /*
537 supported and somehow separate the historic advertised 537 * The use of PIT_TICK_RATE is historic, it used to be
538 tick rate from any real one */ 538 * the platform-dependent CLOCK_TICK_RATE between 2.6.12
539 * and 2.6.36, which was a minor but unfortunate ABI
540 * change.
541 */
539 if (arg) 542 if (arg)
540 arg = CLOCK_TICK_RATE / arg; 543 arg = PIT_TICK_RATE / arg;
541 kd_mksound(arg, 0); 544 kd_mksound(arg, 0);
542 break; 545 break;
543 546
@@ -553,11 +556,8 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
553 */ 556 */
554 ticks = HZ * ((arg >> 16) & 0xffff) / 1000; 557 ticks = HZ * ((arg >> 16) & 0xffff) / 1000;
555 count = ticks ? (arg & 0xffff) : 0; 558 count = ticks ? (arg & 0xffff) : 0;
556 /* FIXME: This is an old broken API but we need to keep it
557 supported and somehow separate the historic advertised
558 tick rate from any real one */
559 if (count) 559 if (count)
560 count = CLOCK_TICK_RATE / count; 560 count = PIT_TICK_RATE / count;
561 kd_mksound(count, ticks); 561 kd_mksound(count, ticks);
562 break; 562 break;
563 } 563 }
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index c2408bbe9c2e..f508690eb958 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -80,7 +80,7 @@
80 * Limiting Performance Impact 80 * Limiting Performance Impact
81 * --------------------------- 81 * ---------------------------
82 * C states, especially those with large exit latencies, can have a real 82 * C states, especially those with large exit latencies, can have a real
83 * noticable impact on workloads, which is not acceptable for most sysadmins, 83 * noticeable impact on workloads, which is not acceptable for most sysadmins,
84 * and in addition, less performance has a power price of its own. 84 * and in addition, less performance has a power price of its own.
85 * 85 *
86 * As a general rule of thumb, menu assumes that the following heuristic 86 * As a general rule of thumb, menu assumes that the following heuristic
diff --git a/drivers/dca/dca-core.c b/drivers/dca/dca-core.c
index 8661c84a105d..b98c67664ae7 100644
--- a/drivers/dca/dca-core.c
+++ b/drivers/dca/dca-core.c
@@ -39,6 +39,10 @@ static DEFINE_SPINLOCK(dca_lock);
39 39
40static LIST_HEAD(dca_domains); 40static LIST_HEAD(dca_domains);
41 41
42static BLOCKING_NOTIFIER_HEAD(dca_provider_chain);
43
44static int dca_providers_blocked;
45
42static struct pci_bus *dca_pci_rc_from_dev(struct device *dev) 46static struct pci_bus *dca_pci_rc_from_dev(struct device *dev)
43{ 47{
44 struct pci_dev *pdev = to_pci_dev(dev); 48 struct pci_dev *pdev = to_pci_dev(dev);
@@ -70,6 +74,60 @@ static void dca_free_domain(struct dca_domain *domain)
70 kfree(domain); 74 kfree(domain);
71} 75}
72 76
77static int dca_provider_ioat_ver_3_0(struct device *dev)
78{
79 struct pci_dev *pdev = to_pci_dev(dev);
80
81 return ((pdev->vendor == PCI_VENDOR_ID_INTEL) &&
82 ((pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG0) ||
83 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG1) ||
84 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG2) ||
85 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG3) ||
86 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG4) ||
87 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG5) ||
88 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG6) ||
89 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG7)));
90}
91
92static void unregister_dca_providers(void)
93{
94 struct dca_provider *dca, *_dca;
95 struct list_head unregistered_providers;
96 struct dca_domain *domain;
97 unsigned long flags;
98
99 blocking_notifier_call_chain(&dca_provider_chain,
100 DCA_PROVIDER_REMOVE, NULL);
101
102 INIT_LIST_HEAD(&unregistered_providers);
103
104 spin_lock_irqsave(&dca_lock, flags);
105
106 if (list_empty(&dca_domains)) {
107 spin_unlock_irqrestore(&dca_lock, flags);
108 return;
109 }
110
111 /* at this point only one domain in the list is expected */
112 domain = list_first_entry(&dca_domains, struct dca_domain, node);
113 if (!domain)
114 return;
115
116 list_for_each_entry_safe(dca, _dca, &domain->dca_providers, node) {
117 list_del(&dca->node);
118 list_add(&dca->node, &unregistered_providers);
119 }
120
121 dca_free_domain(domain);
122
123 spin_unlock_irqrestore(&dca_lock, flags);
124
125 list_for_each_entry_safe(dca, _dca, &unregistered_providers, node) {
126 dca_sysfs_remove_provider(dca);
127 list_del(&dca->node);
128 }
129}
130
73static struct dca_domain *dca_find_domain(struct pci_bus *rc) 131static struct dca_domain *dca_find_domain(struct pci_bus *rc)
74{ 132{
75 struct dca_domain *domain; 133 struct dca_domain *domain;
@@ -90,9 +148,13 @@ static struct dca_domain *dca_get_domain(struct device *dev)
90 domain = dca_find_domain(rc); 148 domain = dca_find_domain(rc);
91 149
92 if (!domain) { 150 if (!domain) {
93 domain = dca_allocate_domain(rc); 151 if (dca_provider_ioat_ver_3_0(dev) && !list_empty(&dca_domains)) {
94 if (domain) 152 dca_providers_blocked = 1;
95 list_add(&domain->node, &dca_domains); 153 } else {
154 domain = dca_allocate_domain(rc);
155 if (domain)
156 list_add(&domain->node, &dca_domains);
157 }
96 } 158 }
97 159
98 return domain; 160 return domain;
@@ -293,8 +355,6 @@ void free_dca_provider(struct dca_provider *dca)
293} 355}
294EXPORT_SYMBOL_GPL(free_dca_provider); 356EXPORT_SYMBOL_GPL(free_dca_provider);
295 357
296static BLOCKING_NOTIFIER_HEAD(dca_provider_chain);
297
298/** 358/**
299 * register_dca_provider - register a dca provider 359 * register_dca_provider - register a dca provider
300 * @dca - struct created by alloc_dca_provider() 360 * @dca - struct created by alloc_dca_provider()
@@ -306,6 +366,13 @@ int register_dca_provider(struct dca_provider *dca, struct device *dev)
306 unsigned long flags; 366 unsigned long flags;
307 struct dca_domain *domain; 367 struct dca_domain *domain;
308 368
369 spin_lock_irqsave(&dca_lock, flags);
370 if (dca_providers_blocked) {
371 spin_unlock_irqrestore(&dca_lock, flags);
372 return -ENODEV;
373 }
374 spin_unlock_irqrestore(&dca_lock, flags);
375
309 err = dca_sysfs_add_provider(dca, dev); 376 err = dca_sysfs_add_provider(dca, dev);
310 if (err) 377 if (err)
311 return err; 378 return err;
@@ -313,7 +380,13 @@ int register_dca_provider(struct dca_provider *dca, struct device *dev)
313 spin_lock_irqsave(&dca_lock, flags); 380 spin_lock_irqsave(&dca_lock, flags);
314 domain = dca_get_domain(dev); 381 domain = dca_get_domain(dev);
315 if (!domain) { 382 if (!domain) {
316 spin_unlock_irqrestore(&dca_lock, flags); 383 if (dca_providers_blocked) {
384 spin_unlock_irqrestore(&dca_lock, flags);
385 dca_sysfs_remove_provider(dca);
386 unregister_dca_providers();
387 } else {
388 spin_unlock_irqrestore(&dca_lock, flags);
389 }
317 return -ENODEV; 390 return -ENODEV;
318 } 391 }
319 list_add(&dca->node, &domain->dca_providers); 392 list_add(&dca->node, &domain->dca_providers);
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index 216f9d383b5b..effd140fc042 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -879,7 +879,7 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca)
879 dma->device_issue_pending = ioat2_issue_pending; 879 dma->device_issue_pending = ioat2_issue_pending;
880 dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; 880 dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
881 dma->device_free_chan_resources = ioat2_free_chan_resources; 881 dma->device_free_chan_resources = ioat2_free_chan_resources;
882 dma->device_tx_status = ioat_tx_status; 882 dma->device_tx_status = ioat_dma_tx_status;
883 883
884 err = ioat_probe(device); 884 err = ioat_probe(device);
885 if (err) 885 if (err)
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 86c5ae9fde34..411d5bf50fc4 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -162,7 +162,7 @@ static int mv_is_err_intr(u32 intr_cause)
162 162
163static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan) 163static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
164{ 164{
165 u32 val = (1 << (1 + (chan->idx * 16))); 165 u32 val = ~(1 << (chan->idx * 16));
166 dev_dbg(chan->device->common.dev, "%s, val 0x%08x\n", __func__, val); 166 dev_dbg(chan->device->common.dev, "%s, val 0x%08x\n", __func__, val);
167 __raw_writel(val, XOR_INTR_CAUSE(chan)); 167 __raw_writel(val, XOR_INTR_CAUSE(chan));
168} 168}
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index fb64cf36ba61..eb6b54dbb806 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -580,7 +580,6 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
580 580
581 sh_chan = to_sh_chan(chan); 581 sh_chan = to_sh_chan(chan);
582 param = chan->private; 582 param = chan->private;
583 slave_addr = param->config->addr;
584 583
585 /* Someone calling slave DMA on a public channel? */ 584 /* Someone calling slave DMA on a public channel? */
586 if (!param || !sg_len) { 585 if (!param || !sg_len) {
@@ -589,6 +588,8 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
589 return NULL; 588 return NULL;
590 } 589 }
591 590
591 slave_addr = param->config->addr;
592
592 /* 593 /*
593 * if (param != NULL), this is a successfully requested slave channel, 594 * if (param != NULL), this is a successfully requested slave channel,
594 * therefore param->config != NULL too. 595 * therefore param->config != NULL too.
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 670239ab7511..e7d5d6b5dcf6 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -2071,16 +2071,6 @@ static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci,
2071 amd64_handle_ce(mci, info); 2071 amd64_handle_ce(mci, info);
2072 else if (ecc_type == 1) 2072 else if (ecc_type == 1)
2073 amd64_handle_ue(mci, info); 2073 amd64_handle_ue(mci, info);
2074
2075 /*
2076 * If main error is CE then overflow must be CE. If main error is UE
2077 * then overflow is unknown. We'll call the overflow a CE - if
2078 * panic_on_ue is set then we're already panic'ed and won't arrive
2079 * here. Else, then apparently someone doesn't think that UE's are
2080 * catastrophic.
2081 */
2082 if (info->nbsh & K8_NBSH_OVERFLOW)
2083 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR " Error Overflow");
2084} 2074}
2085 2075
2086void amd64_decode_bus_error(int node_id, struct err_regs *regs) 2076void amd64_decode_bus_error(int node_id, struct err_regs *regs)
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 3630308e7b81..6b21e25f7a84 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -339,6 +339,9 @@ static void edac_mc_workq_teardown(struct mem_ctl_info *mci)
339{ 339{
340 int status; 340 int status;
341 341
342 if (mci->op_state != OP_RUNNING_POLL)
343 return;
344
342 status = cancel_delayed_work(&mci->work); 345 status = cancel_delayed_work(&mci->work);
343 if (status == 0) { 346 if (status == 0) {
344 debugf0("%s() not canceled, flush the queue\n", 347 debugf0("%s() not canceled, flush the queue\n",
diff --git a/drivers/edac/edac_mce_amd.c b/drivers/edac/edac_mce_amd.c
index bae9351e9473..9014df6f605d 100644
--- a/drivers/edac/edac_mce_amd.c
+++ b/drivers/edac/edac_mce_amd.c
@@ -365,11 +365,10 @@ static int amd_decode_mce(struct notifier_block *nb, unsigned long val,
365 365
366 pr_emerg("MC%d_STATUS: ", m->bank); 366 pr_emerg("MC%d_STATUS: ", m->bank);
367 367
368 pr_cont("%sorrected error, report: %s, MiscV: %svalid, " 368 pr_cont("%sorrected error, other errors lost: %s, "
369 "CPU context corrupt: %s", 369 "CPU context corrupt: %s",
370 ((m->status & MCI_STATUS_UC) ? "Unc" : "C"), 370 ((m->status & MCI_STATUS_UC) ? "Unc" : "C"),
371 ((m->status & MCI_STATUS_EN) ? "yes" : "no"), 371 ((m->status & MCI_STATUS_OVER) ? "yes" : "no"),
372 ((m->status & MCI_STATUS_MISCV) ? "" : "in"),
373 ((m->status & MCI_STATUS_PCC) ? "yes" : "no")); 372 ((m->status & MCI_STATUS_PCC) ? "yes" : "no"));
374 373
375 /* do the two bits[14:13] together */ 374 /* do the two bits[14:13] together */
@@ -426,11 +425,15 @@ static struct notifier_block amd_mce_dec_nb = {
426static int __init mce_amd_init(void) 425static int __init mce_amd_init(void)
427{ 426{
428 /* 427 /*
429 * We can decode MCEs for Opteron and later CPUs: 428 * We can decode MCEs for K8, F10h and F11h CPUs:
430 */ 429 */
431 if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && 430 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
432 (boot_cpu_data.x86 >= 0xf)) 431 return 0;
433 atomic_notifier_chain_register(&x86_mce_decoder_chain, &amd_mce_dec_nb); 432
433 if (boot_cpu_data.x86 < 0xf || boot_cpu_data.x86 > 0x11)
434 return 0;
435
436 atomic_notifier_chain_register(&x86_mce_decoder_chain, &amd_mce_dec_nb);
434 437
435 return 0; 438 return 0;
436} 439}
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index e0187d16dd7c..0fd5b85a0f75 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -1140,6 +1140,7 @@ static struct mcidev_sysfs_attribute i7core_udimm_counters_attrs[] = {
1140 ATTR_COUNTER(0), 1140 ATTR_COUNTER(0),
1141 ATTR_COUNTER(1), 1141 ATTR_COUNTER(1),
1142 ATTR_COUNTER(2), 1142 ATTR_COUNTER(2),
1143 { .attr = { .name = NULL } }
1143}; 1144};
1144 1145
1145static struct mcidev_sysfs_group i7core_udimm_counters = { 1146static struct mcidev_sysfs_group i7core_udimm_counters = {
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index ca7ca56661e0..b42a0bde8494 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -81,6 +81,10 @@ static int close_transaction(struct fw_transaction *transaction,
81 spin_lock_irqsave(&card->lock, flags); 81 spin_lock_irqsave(&card->lock, flags);
82 list_for_each_entry(t, &card->transaction_list, link) { 82 list_for_each_entry(t, &card->transaction_list, link) {
83 if (t == transaction) { 83 if (t == transaction) {
84 if (!del_timer(&t->split_timeout_timer)) {
85 spin_unlock_irqrestore(&card->lock, flags);
86 goto timed_out;
87 }
84 list_del_init(&t->link); 88 list_del_init(&t->link);
85 card->tlabel_mask &= ~(1ULL << t->tlabel); 89 card->tlabel_mask &= ~(1ULL << t->tlabel);
86 break; 90 break;
@@ -89,11 +93,11 @@ static int close_transaction(struct fw_transaction *transaction,
89 spin_unlock_irqrestore(&card->lock, flags); 93 spin_unlock_irqrestore(&card->lock, flags);
90 94
91 if (&t->link != &card->transaction_list) { 95 if (&t->link != &card->transaction_list) {
92 del_timer_sync(&t->split_timeout_timer);
93 t->callback(card, rcode, NULL, 0, t->callback_data); 96 t->callback(card, rcode, NULL, 0, t->callback_data);
94 return 0; 97 return 0;
95 } 98 }
96 99
100 timed_out:
97 return -ENOENT; 101 return -ENOENT;
98} 102}
99 103
@@ -921,6 +925,10 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
921 spin_lock_irqsave(&card->lock, flags); 925 spin_lock_irqsave(&card->lock, flags);
922 list_for_each_entry(t, &card->transaction_list, link) { 926 list_for_each_entry(t, &card->transaction_list, link) {
923 if (t->node_id == source && t->tlabel == tlabel) { 927 if (t->node_id == source && t->tlabel == tlabel) {
928 if (!del_timer(&t->split_timeout_timer)) {
929 spin_unlock_irqrestore(&card->lock, flags);
930 goto timed_out;
931 }
924 list_del_init(&t->link); 932 list_del_init(&t->link);
925 card->tlabel_mask &= ~(1ULL << t->tlabel); 933 card->tlabel_mask &= ~(1ULL << t->tlabel);
926 break; 934 break;
@@ -929,6 +937,7 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
929 spin_unlock_irqrestore(&card->lock, flags); 937 spin_unlock_irqrestore(&card->lock, flags);
930 938
931 if (&t->link == &card->transaction_list) { 939 if (&t->link == &card->transaction_list) {
940 timed_out:
932 fw_notify("Unsolicited response (source %x, tlabel %x)\n", 941 fw_notify("Unsolicited response (source %x, tlabel %x)\n",
933 source, tlabel); 942 source, tlabel);
934 return; 943 return;
@@ -963,8 +972,6 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
963 break; 972 break;
964 } 973 }
965 974
966 del_timer_sync(&t->split_timeout_timer);
967
968 /* 975 /*
969 * The response handler may be executed while the request handler 976 * The response handler may be executed while the request handler
970 * is still pending. Cancel the request handler. 977 * is still pending. Cancel the request handler.
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index da17d409a244..33f8421c71cc 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -579,7 +579,7 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
579 if (!peer) { 579 if (!peer) {
580 fw_notify("No peer for ARP packet from %016llx\n", 580 fw_notify("No peer for ARP packet from %016llx\n",
581 (unsigned long long)peer_guid); 581 (unsigned long long)peer_guid);
582 goto failed_proto; 582 goto no_peer;
583 } 583 }
584 584
585 /* 585 /*
@@ -656,7 +656,7 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
656 656
657 return 0; 657 return 0;
658 658
659 failed_proto: 659 no_peer:
660 net->stats.rx_errors++; 660 net->stats.rx_errors++;
661 net->stats.rx_dropped++; 661 net->stats.rx_dropped++;
662 662
@@ -664,7 +664,7 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
664 if (netif_queue_stopped(net)) 664 if (netif_queue_stopped(net))
665 netif_wake_queue(net); 665 netif_wake_queue(net);
666 666
667 return 0; 667 return -ENOENT;
668} 668}
669 669
670static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len, 670static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
@@ -701,7 +701,7 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
701 fw_error("out of memory\n"); 701 fw_error("out of memory\n");
702 net->stats.rx_dropped++; 702 net->stats.rx_dropped++;
703 703
704 return -1; 704 return -ENOMEM;
705 } 705 }
706 skb_reserve(skb, (net->hard_header_len + 15) & ~15); 706 skb_reserve(skb, (net->hard_header_len + 15) & ~15);
707 memcpy(skb_put(skb, len), buf, len); 707 memcpy(skb_put(skb, len), buf, len);
@@ -726,8 +726,10 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
726 spin_lock_irqsave(&dev->lock, flags); 726 spin_lock_irqsave(&dev->lock, flags);
727 727
728 peer = fwnet_peer_find_by_node_id(dev, source_node_id, generation); 728 peer = fwnet_peer_find_by_node_id(dev, source_node_id, generation);
729 if (!peer) 729 if (!peer) {
730 goto bad_proto; 730 retval = -ENOENT;
731 goto fail;
732 }
731 733
732 pd = fwnet_pd_find(peer, datagram_label); 734 pd = fwnet_pd_find(peer, datagram_label);
733 if (pd == NULL) { 735 if (pd == NULL) {
@@ -741,7 +743,7 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
741 dg_size, buf, fg_off, len); 743 dg_size, buf, fg_off, len);
742 if (pd == NULL) { 744 if (pd == NULL) {
743 retval = -ENOMEM; 745 retval = -ENOMEM;
744 goto bad_proto; 746 goto fail;
745 } 747 }
746 peer->pdg_size++; 748 peer->pdg_size++;
747 } else { 749 } else {
@@ -755,9 +757,9 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
755 pd = fwnet_pd_new(net, peer, datagram_label, 757 pd = fwnet_pd_new(net, peer, datagram_label,
756 dg_size, buf, fg_off, len); 758 dg_size, buf, fg_off, len);
757 if (pd == NULL) { 759 if (pd == NULL) {
758 retval = -ENOMEM;
759 peer->pdg_size--; 760 peer->pdg_size--;
760 goto bad_proto; 761 retval = -ENOMEM;
762 goto fail;
761 } 763 }
762 } else { 764 } else {
763 if (!fwnet_pd_update(peer, pd, buf, fg_off, len)) { 765 if (!fwnet_pd_update(peer, pd, buf, fg_off, len)) {
@@ -768,7 +770,8 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
768 */ 770 */
769 fwnet_pd_delete(pd); 771 fwnet_pd_delete(pd);
770 peer->pdg_size--; 772 peer->pdg_size--;
771 goto bad_proto; 773 retval = -ENOMEM;
774 goto fail;
772 } 775 }
773 } 776 }
774 } /* new datagram or add to existing one */ 777 } /* new datagram or add to existing one */
@@ -794,14 +797,13 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
794 spin_unlock_irqrestore(&dev->lock, flags); 797 spin_unlock_irqrestore(&dev->lock, flags);
795 798
796 return 0; 799 return 0;
797 800 fail:
798 bad_proto:
799 spin_unlock_irqrestore(&dev->lock, flags); 801 spin_unlock_irqrestore(&dev->lock, flags);
800 802
801 if (netif_queue_stopped(net)) 803 if (netif_queue_stopped(net))
802 netif_wake_queue(net); 804 netif_wake_queue(net);
803 805
804 return 0; 806 return retval;
805} 807}
806 808
807static void fwnet_receive_packet(struct fw_card *card, struct fw_request *r, 809static void fwnet_receive_packet(struct fw_card *card, struct fw_request *r,
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 7f03540cabe8..1b05896648bc 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -263,6 +263,7 @@ static const struct {
263 {PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_FW, QUIRK_NO_MSI}, 263 {PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_FW, QUIRK_NO_MSI},
264 {PCI_VENDOR_ID_NEC, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, 264 {PCI_VENDOR_ID_NEC, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
265 {PCI_VENDOR_ID_VIA, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, 265 {PCI_VENDOR_ID_VIA, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
266 {PCI_VENDOR_ID_RICOH, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
266 {PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_FW, QUIRK_BE_HEADERS}, 267 {PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_FW, QUIRK_BE_HEADERS},
267}; 268};
268 269
@@ -694,7 +695,15 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
694 log_ar_at_event('R', p.speed, p.header, evt); 695 log_ar_at_event('R', p.speed, p.header, evt);
695 696
696 /* 697 /*
697 * The OHCI bus reset handler synthesizes a phy packet with 698 * Several controllers, notably from NEC and VIA, forget to
699 * write ack_complete status at PHY packet reception.
700 */
701 if (evt == OHCI1394_evt_no_status &&
702 (p.header[0] & 0xff) == (OHCI1394_phy_tcode << 4))
703 p.ack = ACK_COMPLETE;
704
705 /*
706 * The OHCI bus reset handler synthesizes a PHY packet with
698 * the new generation number when a bus reset happens (see 707 * the new generation number when a bus reset happens (see
699 * section 8.4.2.3). This helps us determine when a request 708 * section 8.4.2.3). This helps us determine when a request
700 * was received and make sure we send the response in the same 709 * was received and make sure we send the response in the same
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index 9f76171717e5..bfae4b309791 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -450,7 +450,7 @@ static void sbp2_status_write(struct fw_card *card, struct fw_request *request,
450 450
451 if (&orb->link != &lu->orb_list) { 451 if (&orb->link != &lu->orb_list) {
452 orb->callback(orb, &status); 452 orb->callback(orb, &status);
453 kref_put(&orb->kref, free_orb); 453 kref_put(&orb->kref, free_orb); /* orb callback reference */
454 } else { 454 } else {
455 fw_error("status write for unknown orb\n"); 455 fw_error("status write for unknown orb\n");
456 } 456 }
@@ -472,20 +472,28 @@ static void complete_transaction(struct fw_card *card, int rcode,
472 * So this callback only sets the rcode if it hasn't already 472 * So this callback only sets the rcode if it hasn't already
473 * been set and only does the cleanup if the transaction 473 * been set and only does the cleanup if the transaction
474 * failed and we didn't already get a status write. 474 * failed and we didn't already get a status write.
475 *
476 * Here we treat RCODE_CANCELLED like RCODE_COMPLETE because some
477 * OXUF936QSE firmwares occasionally respond after Split_Timeout and
478 * complete the ORB just fine. Note, we also get RCODE_CANCELLED
479 * from sbp2_cancel_orbs() if fw_cancel_transaction() == 0.
475 */ 480 */
476 spin_lock_irqsave(&card->lock, flags); 481 spin_lock_irqsave(&card->lock, flags);
477 482
478 if (orb->rcode == -1) 483 if (orb->rcode == -1)
479 orb->rcode = rcode; 484 orb->rcode = rcode;
480 if (orb->rcode != RCODE_COMPLETE) { 485
486 if (orb->rcode != RCODE_COMPLETE && orb->rcode != RCODE_CANCELLED) {
481 list_del(&orb->link); 487 list_del(&orb->link);
482 spin_unlock_irqrestore(&card->lock, flags); 488 spin_unlock_irqrestore(&card->lock, flags);
489
483 orb->callback(orb, NULL); 490 orb->callback(orb, NULL);
491 kref_put(&orb->kref, free_orb); /* orb callback reference */
484 } else { 492 } else {
485 spin_unlock_irqrestore(&card->lock, flags); 493 spin_unlock_irqrestore(&card->lock, flags);
486 } 494 }
487 495
488 kref_put(&orb->kref, free_orb); 496 kref_put(&orb->kref, free_orb); /* transaction callback reference */
489} 497}
490 498
491static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu, 499static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu,
@@ -501,9 +509,8 @@ static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu,
501 list_add_tail(&orb->link, &lu->orb_list); 509 list_add_tail(&orb->link, &lu->orb_list);
502 spin_unlock_irqrestore(&device->card->lock, flags); 510 spin_unlock_irqrestore(&device->card->lock, flags);
503 511
504 /* Take a ref for the orb list and for the transaction callback. */ 512 kref_get(&orb->kref); /* transaction callback reference */
505 kref_get(&orb->kref); 513 kref_get(&orb->kref); /* orb callback reference */
506 kref_get(&orb->kref);
507 514
508 fw_send_request(device->card, &orb->t, TCODE_WRITE_BLOCK_REQUEST, 515 fw_send_request(device->card, &orb->t, TCODE_WRITE_BLOCK_REQUEST,
509 node_id, generation, device->max_speed, offset, 516 node_id, generation, device->max_speed, offset,
@@ -525,11 +532,11 @@ static int sbp2_cancel_orbs(struct sbp2_logical_unit *lu)
525 532
526 list_for_each_entry_safe(orb, next, &list, link) { 533 list_for_each_entry_safe(orb, next, &list, link) {
527 retval = 0; 534 retval = 0;
528 if (fw_cancel_transaction(device->card, &orb->t) == 0) 535 fw_cancel_transaction(device->card, &orb->t);
529 continue;
530 536
531 orb->rcode = RCODE_CANCELLED; 537 orb->rcode = RCODE_CANCELLED;
532 orb->callback(orb, NULL); 538 orb->callback(orb, NULL);
539 kref_put(&orb->kref, free_orb); /* orb callback reference */
533 } 540 }
534 541
535 return retval; 542 return retval;
diff --git a/drivers/gpio/sx150x.c b/drivers/gpio/sx150x.c
index b42f42ca70c3..823559ab0e24 100644
--- a/drivers/gpio/sx150x.c
+++ b/drivers/gpio/sx150x.c
@@ -459,17 +459,33 @@ static int sx150x_init_io(struct sx150x_chip *chip, u8 base, u16 cfg)
459 return err; 459 return err;
460} 460}
461 461
462static int sx150x_init_hw(struct sx150x_chip *chip, 462static int sx150x_reset(struct sx150x_chip *chip)
463 struct sx150x_platform_data *pdata)
464{ 463{
465 int err = 0; 464 int err;
466 465
467 err = i2c_smbus_write_word_data(chip->client, 466 err = i2c_smbus_write_byte_data(chip->client,
468 chip->dev_cfg->reg_reset, 467 chip->dev_cfg->reg_reset,
469 0x3412); 468 0x12);
470 if (err < 0) 469 if (err < 0)
471 return err; 470 return err;
472 471
472 err = i2c_smbus_write_byte_data(chip->client,
473 chip->dev_cfg->reg_reset,
474 0x34);
475 return err;
476}
477
478static int sx150x_init_hw(struct sx150x_chip *chip,
479 struct sx150x_platform_data *pdata)
480{
481 int err = 0;
482
483 if (pdata->reset_during_probe) {
484 err = sx150x_reset(chip);
485 if (err < 0)
486 return err;
487 }
488
473 err = sx150x_i2c_write(chip->client, 489 err = sx150x_i2c_write(chip->client,
474 chip->dev_cfg->reg_misc, 490 chip->dev_cfg->reg_misc,
475 0x01); 491 0x01);
diff --git a/drivers/gpu/drm/drm_buffer.c b/drivers/gpu/drm/drm_buffer.c
index 55d03ed05000..529a0dbe9fc6 100644
--- a/drivers/gpu/drm/drm_buffer.c
+++ b/drivers/gpu/drm/drm_buffer.c
@@ -98,8 +98,8 @@ EXPORT_SYMBOL(drm_buffer_alloc);
98 * user_data: A pointer the data that is copied to the buffer. 98 * user_data: A pointer the data that is copied to the buffer.
99 * size: The Number of bytes to copy. 99 * size: The Number of bytes to copy.
100 */ 100 */
101extern int drm_buffer_copy_from_user(struct drm_buffer *buf, 101int drm_buffer_copy_from_user(struct drm_buffer *buf,
102 void __user *user_data, int size) 102 void __user *user_data, int size)
103{ 103{
104 int nr_pages = size / PAGE_SIZE + 1; 104 int nr_pages = size / PAGE_SIZE + 1;
105 int idx; 105 int idx;
@@ -163,7 +163,7 @@ void *drm_buffer_read_object(struct drm_buffer *buf,
163{ 163{
164 int idx = drm_buffer_index(buf); 164 int idx = drm_buffer_index(buf);
165 int page = drm_buffer_page(buf); 165 int page = drm_buffer_page(buf);
166 void *obj = 0; 166 void *obj = NULL;
167 167
168 if (idx + objsize <= PAGE_SIZE) { 168 if (idx + objsize <= PAGE_SIZE) {
169 obj = &buf->data[page][idx]; 169 obj = &buf->data[page][idx];
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 7e31d4348340..dcbeb98f195a 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -34,6 +34,9 @@
34#include "drm_crtc_helper.h" 34#include "drm_crtc_helper.h"
35#include "drm_fb_helper.h" 35#include "drm_fb_helper.h"
36 36
37static bool drm_kms_helper_poll = true;
38module_param_named(poll, drm_kms_helper_poll, bool, 0600);
39
37static void drm_mode_validate_flag(struct drm_connector *connector, 40static void drm_mode_validate_flag(struct drm_connector *connector,
38 int flags) 41 int flags)
39{ 42{
@@ -99,8 +102,10 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
99 connector->status = connector_status_disconnected; 102 connector->status = connector_status_disconnected;
100 if (connector->funcs->force) 103 if (connector->funcs->force)
101 connector->funcs->force(connector); 104 connector->funcs->force(connector);
102 } else 105 } else {
103 connector->status = connector->funcs->detect(connector); 106 connector->status = connector->funcs->detect(connector, true);
107 drm_kms_helper_poll_enable(dev);
108 }
104 109
105 if (connector->status == connector_status_disconnected) { 110 if (connector->status == connector_status_disconnected) {
106 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n", 111 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
@@ -110,11 +115,10 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
110 } 115 }
111 116
112 count = (*connector_funcs->get_modes)(connector); 117 count = (*connector_funcs->get_modes)(connector);
113 if (!count) { 118 if (count == 0 && connector->status == connector_status_connected)
114 count = drm_add_modes_noedid(connector, 1024, 768); 119 count = drm_add_modes_noedid(connector, 1024, 768);
115 if (!count) 120 if (count == 0)
116 return 0; 121 goto prune;
117 }
118 122
119 drm_mode_connector_list_update(connector); 123 drm_mode_connector_list_update(connector);
120 124
@@ -633,13 +637,13 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
633 mode_changed = true; 637 mode_changed = true;
634 638
635 if (mode_changed) { 639 if (mode_changed) {
636 old_fb = set->crtc->fb;
637 set->crtc->fb = set->fb;
638 set->crtc->enabled = (set->mode != NULL); 640 set->crtc->enabled = (set->mode != NULL);
639 if (set->mode != NULL) { 641 if (set->mode != NULL) {
640 DRM_DEBUG_KMS("attempting to set mode from" 642 DRM_DEBUG_KMS("attempting to set mode from"
641 " userspace\n"); 643 " userspace\n");
642 drm_mode_debug_printmodeline(set->mode); 644 drm_mode_debug_printmodeline(set->mode);
645 old_fb = set->crtc->fb;
646 set->crtc->fb = set->fb;
643 if (!drm_crtc_helper_set_mode(set->crtc, set->mode, 647 if (!drm_crtc_helper_set_mode(set->crtc, set->mode,
644 set->x, set->y, 648 set->x, set->y,
645 old_fb)) { 649 old_fb)) {
@@ -840,6 +844,9 @@ static void output_poll_execute(struct work_struct *work)
840 enum drm_connector_status old_status, status; 844 enum drm_connector_status old_status, status;
841 bool repoll = false, changed = false; 845 bool repoll = false, changed = false;
842 846
847 if (!drm_kms_helper_poll)
848 return;
849
843 mutex_lock(&dev->mode_config.mutex); 850 mutex_lock(&dev->mode_config.mutex);
844 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 851 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
845 852
@@ -859,7 +866,7 @@ static void output_poll_execute(struct work_struct *work)
859 !(connector->polled & DRM_CONNECTOR_POLL_HPD)) 866 !(connector->polled & DRM_CONNECTOR_POLL_HPD))
860 continue; 867 continue;
861 868
862 status = connector->funcs->detect(connector); 869 status = connector->funcs->detect(connector, false);
863 if (old_status != status) 870 if (old_status != status)
864 changed = true; 871 changed = true;
865 } 872 }
@@ -890,6 +897,9 @@ void drm_kms_helper_poll_enable(struct drm_device *dev)
890 bool poll = false; 897 bool poll = false;
891 struct drm_connector *connector; 898 struct drm_connector *connector;
892 899
900 if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll)
901 return;
902
893 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 903 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
894 if (connector->polled) 904 if (connector->polled)
895 poll = true; 905 poll = true;
@@ -919,8 +929,10 @@ void drm_helper_hpd_irq_event(struct drm_device *dev)
919{ 929{
920 if (!dev->mode_config.poll_enabled) 930 if (!dev->mode_config.poll_enabled)
921 return; 931 return;
932
922 /* kill timer and schedule immediate execution, this doesn't block */ 933 /* kill timer and schedule immediate execution, this doesn't block */
923 cancel_delayed_work(&dev->mode_config.output_poll_work); 934 cancel_delayed_work(&dev->mode_config.output_poll_work);
924 queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, 0); 935 if (drm_kms_helper_poll)
936 queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, 0);
925} 937}
926EXPORT_SYMBOL(drm_helper_hpd_irq_event); 938EXPORT_SYMBOL(drm_helper_hpd_irq_event);
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 90288ec7c284..84da748555bc 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -55,6 +55,9 @@
55static int drm_version(struct drm_device *dev, void *data, 55static int drm_version(struct drm_device *dev, void *data,
56 struct drm_file *file_priv); 56 struct drm_file *file_priv);
57 57
58#define DRM_IOCTL_DEF(ioctl, _func, _flags) \
59 [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0}
60
58/** Ioctl table */ 61/** Ioctl table */
59static struct drm_ioctl_desc drm_ioctls[] = { 62static struct drm_ioctl_desc drm_ioctls[] = {
60 DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0), 63 DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0),
@@ -421,6 +424,7 @@ long drm_ioctl(struct file *filp,
421 int retcode = -EINVAL; 424 int retcode = -EINVAL;
422 char stack_kdata[128]; 425 char stack_kdata[128];
423 char *kdata = NULL; 426 char *kdata = NULL;
427 unsigned int usize, asize;
424 428
425 dev = file_priv->minor->dev; 429 dev = file_priv->minor->dev;
426 atomic_inc(&dev->ioctl_count); 430 atomic_inc(&dev->ioctl_count);
@@ -436,11 +440,18 @@ long drm_ioctl(struct file *filp,
436 ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END))) 440 ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END)))
437 goto err_i1; 441 goto err_i1;
438 if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) && 442 if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) &&
439 (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) 443 (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
444 u32 drv_size;
440 ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE]; 445 ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
446 drv_size = _IOC_SIZE(ioctl->cmd_drv);
447 usize = asize = _IOC_SIZE(cmd);
448 if (drv_size > asize)
449 asize = drv_size;
450 }
441 else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) { 451 else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) {
442 ioctl = &drm_ioctls[nr]; 452 ioctl = &drm_ioctls[nr];
443 cmd = ioctl->cmd; 453 cmd = ioctl->cmd;
454 usize = asize = _IOC_SIZE(cmd);
444 } else 455 } else
445 goto err_i1; 456 goto err_i1;
446 457
@@ -460,10 +471,10 @@ long drm_ioctl(struct file *filp,
460 retcode = -EACCES; 471 retcode = -EACCES;
461 } else { 472 } else {
462 if (cmd & (IOC_IN | IOC_OUT)) { 473 if (cmd & (IOC_IN | IOC_OUT)) {
463 if (_IOC_SIZE(cmd) <= sizeof(stack_kdata)) { 474 if (asize <= sizeof(stack_kdata)) {
464 kdata = stack_kdata; 475 kdata = stack_kdata;
465 } else { 476 } else {
466 kdata = kmalloc(_IOC_SIZE(cmd), GFP_KERNEL); 477 kdata = kmalloc(asize, GFP_KERNEL);
467 if (!kdata) { 478 if (!kdata) {
468 retcode = -ENOMEM; 479 retcode = -ENOMEM;
469 goto err_i1; 480 goto err_i1;
@@ -473,11 +484,13 @@ long drm_ioctl(struct file *filp,
473 484
474 if (cmd & IOC_IN) { 485 if (cmd & IOC_IN) {
475 if (copy_from_user(kdata, (void __user *)arg, 486 if (copy_from_user(kdata, (void __user *)arg,
476 _IOC_SIZE(cmd)) != 0) { 487 usize) != 0) {
477 retcode = -EFAULT; 488 retcode = -EFAULT;
478 goto err_i1; 489 goto err_i1;
479 } 490 }
480 } 491 } else
492 memset(kdata, 0, usize);
493
481 if (ioctl->flags & DRM_UNLOCKED) 494 if (ioctl->flags & DRM_UNLOCKED)
482 retcode = func(dev, kdata, file_priv); 495 retcode = func(dev, kdata, file_priv);
483 else { 496 else {
@@ -488,7 +501,7 @@ long drm_ioctl(struct file *filp,
488 501
489 if (cmd & IOC_OUT) { 502 if (cmd & IOC_OUT) {
490 if (copy_to_user((void __user *)arg, kdata, 503 if (copy_to_user((void __user *)arg, kdata,
491 _IOC_SIZE(cmd)) != 0) 504 usize) != 0)
492 retcode = -EFAULT; 505 retcode = -EFAULT;
493 } 506 }
494 } 507 }
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index de82e201d682..6a5e403f9aa1 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -94,10 +94,11 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_fb_helper_conn
94 int i; 94 int i;
95 enum drm_connector_force force = DRM_FORCE_UNSPECIFIED; 95 enum drm_connector_force force = DRM_FORCE_UNSPECIFIED;
96 struct drm_fb_helper_cmdline_mode *cmdline_mode; 96 struct drm_fb_helper_cmdline_mode *cmdline_mode;
97 struct drm_connector *connector = fb_helper_conn->connector; 97 struct drm_connector *connector;
98 98
99 if (!fb_helper_conn) 99 if (!fb_helper_conn)
100 return false; 100 return false;
101 connector = fb_helper_conn->connector;
101 102
102 cmdline_mode = &fb_helper_conn->cmdline_mode; 103 cmdline_mode = &fb_helper_conn->cmdline_mode;
103 if (!mode_option) 104 if (!mode_option)
@@ -369,7 +370,7 @@ static void drm_fb_helper_restore_work_fn(struct work_struct *ignored)
369} 370}
370static DECLARE_WORK(drm_fb_helper_restore_work, drm_fb_helper_restore_work_fn); 371static DECLARE_WORK(drm_fb_helper_restore_work, drm_fb_helper_restore_work_fn);
371 372
372static void drm_fb_helper_sysrq(int dummy1, struct tty_struct *dummy3) 373static void drm_fb_helper_sysrq(int dummy1)
373{ 374{
374 schedule_work(&drm_fb_helper_restore_work); 375 schedule_work(&drm_fb_helper_restore_work);
375} 376}
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 3a652a65546f..b744dad5c237 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -41,6 +41,7 @@
41 41
42/* from BKL pushdown: note that nothing else serializes idr_find() */ 42/* from BKL pushdown: note that nothing else serializes idr_find() */
43DEFINE_MUTEX(drm_global_mutex); 43DEFINE_MUTEX(drm_global_mutex);
44EXPORT_SYMBOL(drm_global_mutex);
44 45
45static int drm_open_helper(struct inode *inode, struct file *filp, 46static int drm_open_helper(struct inode *inode, struct file *filp,
46 struct drm_device * dev); 47 struct drm_device * dev);
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index bf92d07510df..5663d2719063 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -148,7 +148,7 @@ int drm_gem_object_init(struct drm_device *dev,
148 return -ENOMEM; 148 return -ENOMEM;
149 149
150 kref_init(&obj->refcount); 150 kref_init(&obj->refcount);
151 kref_init(&obj->handlecount); 151 atomic_set(&obj->handle_count, 0);
152 obj->size = size; 152 obj->size = size;
153 153
154 atomic_inc(&dev->object_count); 154 atomic_inc(&dev->object_count);
@@ -462,28 +462,6 @@ drm_gem_object_free(struct kref *kref)
462} 462}
463EXPORT_SYMBOL(drm_gem_object_free); 463EXPORT_SYMBOL(drm_gem_object_free);
464 464
465/**
466 * Called after the last reference to the object has been lost.
467 * Must be called without holding struct_mutex
468 *
469 * Frees the object
470 */
471void
472drm_gem_object_free_unlocked(struct kref *kref)
473{
474 struct drm_gem_object *obj = (struct drm_gem_object *) kref;
475 struct drm_device *dev = obj->dev;
476
477 if (dev->driver->gem_free_object_unlocked != NULL)
478 dev->driver->gem_free_object_unlocked(obj);
479 else if (dev->driver->gem_free_object != NULL) {
480 mutex_lock(&dev->struct_mutex);
481 dev->driver->gem_free_object(obj);
482 mutex_unlock(&dev->struct_mutex);
483 }
484}
485EXPORT_SYMBOL(drm_gem_object_free_unlocked);
486
487static void drm_gem_object_ref_bug(struct kref *list_kref) 465static void drm_gem_object_ref_bug(struct kref *list_kref)
488{ 466{
489 BUG(); 467 BUG();
@@ -496,12 +474,8 @@ static void drm_gem_object_ref_bug(struct kref *list_kref)
496 * called before drm_gem_object_free or we'll be touching 474 * called before drm_gem_object_free or we'll be touching
497 * freed memory 475 * freed memory
498 */ 476 */
499void 477void drm_gem_object_handle_free(struct drm_gem_object *obj)
500drm_gem_object_handle_free(struct kref *kref)
501{ 478{
502 struct drm_gem_object *obj = container_of(kref,
503 struct drm_gem_object,
504 handlecount);
505 struct drm_device *dev = obj->dev; 479 struct drm_device *dev = obj->dev;
506 480
507 /* Remove any name for this object */ 481 /* Remove any name for this object */
@@ -528,6 +502,10 @@ void drm_gem_vm_open(struct vm_area_struct *vma)
528 struct drm_gem_object *obj = vma->vm_private_data; 502 struct drm_gem_object *obj = vma->vm_private_data;
529 503
530 drm_gem_object_reference(obj); 504 drm_gem_object_reference(obj);
505
506 mutex_lock(&obj->dev->struct_mutex);
507 drm_vm_open_locked(vma);
508 mutex_unlock(&obj->dev->struct_mutex);
531} 509}
532EXPORT_SYMBOL(drm_gem_vm_open); 510EXPORT_SYMBOL(drm_gem_vm_open);
533 511
@@ -535,7 +513,10 @@ void drm_gem_vm_close(struct vm_area_struct *vma)
535{ 513{
536 struct drm_gem_object *obj = vma->vm_private_data; 514 struct drm_gem_object *obj = vma->vm_private_data;
537 515
538 drm_gem_object_unreference_unlocked(obj); 516 mutex_lock(&obj->dev->struct_mutex);
517 drm_vm_close_locked(vma);
518 drm_gem_object_unreference(obj);
519 mutex_unlock(&obj->dev->struct_mutex);
539} 520}
540EXPORT_SYMBOL(drm_gem_vm_close); 521EXPORT_SYMBOL(drm_gem_vm_close);
541 522
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index 2ef2c7827243..974e970ce3f8 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -255,7 +255,7 @@ int drm_gem_one_name_info(int id, void *ptr, void *data)
255 255
256 seq_printf(m, "%6d %8zd %7d %8d\n", 256 seq_printf(m, "%6d %8zd %7d %8d\n",
257 obj->name, obj->size, 257 obj->name, obj->size,
258 atomic_read(&obj->handlecount.refcount), 258 atomic_read(&obj->handle_count),
259 atomic_read(&obj->refcount.refcount)); 259 atomic_read(&obj->refcount.refcount));
260 return 0; 260 return 0;
261} 261}
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index e2f70a516c34..9bf93bc9a32c 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -92,7 +92,9 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
92 } 92 }
93 93
94 /* Contention */ 94 /* Contention */
95 mutex_unlock(&drm_global_mutex);
95 schedule(); 96 schedule();
97 mutex_lock(&drm_global_mutex);
96 if (signal_pending(current)) { 98 if (signal_pending(current)) {
97 ret = -EINTR; 99 ret = -EINTR;
98 break; 100 break;
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index da99edc50888..a6bfc302ed90 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -285,21 +285,21 @@ void drm_mm_put_block(struct drm_mm_node *cur)
285 285
286EXPORT_SYMBOL(drm_mm_put_block); 286EXPORT_SYMBOL(drm_mm_put_block);
287 287
288static int check_free_mm_node(struct drm_mm_node *entry, unsigned long size, 288static int check_free_hole(unsigned long start, unsigned long end,
289 unsigned alignment) 289 unsigned long size, unsigned alignment)
290{ 290{
291 unsigned wasted = 0; 291 unsigned wasted = 0;
292 292
293 if (entry->size < size) 293 if (end - start < size)
294 return 0; 294 return 0;
295 295
296 if (alignment) { 296 if (alignment) {
297 register unsigned tmp = entry->start % alignment; 297 unsigned tmp = start % alignment;
298 if (tmp) 298 if (tmp)
299 wasted = alignment - tmp; 299 wasted = alignment - tmp;
300 } 300 }
301 301
302 if (entry->size >= size + wasted) { 302 if (end >= start + size + wasted) {
303 return 1; 303 return 1;
304 } 304 }
305 305
@@ -320,7 +320,8 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
320 best_size = ~0UL; 320 best_size = ~0UL;
321 321
322 list_for_each_entry(entry, &mm->free_stack, free_stack) { 322 list_for_each_entry(entry, &mm->free_stack, free_stack) {
323 if (!check_free_mm_node(entry, size, alignment)) 323 if (!check_free_hole(entry->start, entry->start + entry->size,
324 size, alignment))
324 continue; 325 continue;
325 326
326 if (!best_match) 327 if (!best_match)
@@ -353,10 +354,12 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
353 best_size = ~0UL; 354 best_size = ~0UL;
354 355
355 list_for_each_entry(entry, &mm->free_stack, free_stack) { 356 list_for_each_entry(entry, &mm->free_stack, free_stack) {
356 if (entry->start > end || (entry->start+entry->size) < start) 357 unsigned long adj_start = entry->start < start ?
357 continue; 358 start : entry->start;
359 unsigned long adj_end = entry->start + entry->size > end ?
360 end : entry->start + entry->size;
358 361
359 if (!check_free_mm_node(entry, size, alignment)) 362 if (!check_free_hole(adj_start, adj_end, size, alignment))
360 continue; 363 continue;
361 364
362 if (!best_match) 365 if (!best_match)
@@ -449,7 +452,8 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
449 node->free_stack.prev = prev_free; 452 node->free_stack.prev = prev_free;
450 node->free_stack.next = next_free; 453 node->free_stack.next = next_free;
451 454
452 if (check_free_mm_node(node, mm->scan_size, mm->scan_alignment)) { 455 if (check_free_hole(node->start, node->start + node->size,
456 mm->scan_size, mm->scan_alignment)) {
453 mm->scan_hit_start = node->start; 457 mm->scan_hit_start = node->start;
454 mm->scan_hit_size = node->size; 458 mm->scan_hit_size = node->size;
455 459
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index f1f473ea97d3..949326d2a8e5 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -251,7 +251,10 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
251 drm_mode->htotal = drm_mode->hdisplay + CVT_RB_H_BLANK; 251 drm_mode->htotal = drm_mode->hdisplay + CVT_RB_H_BLANK;
252 /* Fill in HSync values */ 252 /* Fill in HSync values */
253 drm_mode->hsync_end = drm_mode->hdisplay + CVT_RB_H_BLANK / 2; 253 drm_mode->hsync_end = drm_mode->hdisplay + CVT_RB_H_BLANK / 2;
254 drm_mode->hsync_start = drm_mode->hsync_end = CVT_RB_H_SYNC; 254 drm_mode->hsync_start = drm_mode->hsync_end - CVT_RB_H_SYNC;
255 /* Fill in VSync values */
256 drm_mode->vsync_start = drm_mode->vdisplay + CVT_RB_VFPORCH;
257 drm_mode->vsync_end = drm_mode->vsync_start + vsync;
255 } 258 }
256 /* 15/13. Find pixel clock frequency (kHz for xf86) */ 259 /* 15/13. Find pixel clock frequency (kHz for xf86) */
257 drm_mode->clock = drm_mode->htotal * HV_FACTOR * 1000 / hperiod; 260 drm_mode->clock = drm_mode->htotal * HV_FACTOR * 1000 / hperiod;
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index e20f78b542a7..f5bd9e590c80 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -164,6 +164,8 @@ int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
164 dev->hose = pdev->sysdata; 164 dev->hose = pdev->sysdata;
165#endif 165#endif
166 166
167 mutex_lock(&drm_global_mutex);
168
167 if ((ret = drm_fill_in_dev(dev, ent, driver))) { 169 if ((ret = drm_fill_in_dev(dev, ent, driver))) {
168 printk(KERN_ERR "DRM: Fill_in_dev failed.\n"); 170 printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
169 goto err_g2; 171 goto err_g2;
@@ -199,6 +201,7 @@ int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
199 driver->name, driver->major, driver->minor, driver->patchlevel, 201 driver->name, driver->major, driver->minor, driver->patchlevel,
200 driver->date, pci_name(pdev), dev->primary->index); 202 driver->date, pci_name(pdev), dev->primary->index);
201 203
204 mutex_unlock(&drm_global_mutex);
202 return 0; 205 return 0;
203 206
204err_g4: 207err_g4:
@@ -210,6 +213,7 @@ err_g2:
210 pci_disable_device(pdev); 213 pci_disable_device(pdev);
211err_g1: 214err_g1:
212 kfree(dev); 215 kfree(dev);
216 mutex_unlock(&drm_global_mutex);
213 return ret; 217 return ret;
214} 218}
215EXPORT_SYMBOL(drm_get_pci_dev); 219EXPORT_SYMBOL(drm_get_pci_dev);
diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c
index 460e9a3afa8d..92d1d0fb7b75 100644
--- a/drivers/gpu/drm/drm_platform.c
+++ b/drivers/gpu/drm/drm_platform.c
@@ -53,6 +53,8 @@ int drm_get_platform_dev(struct platform_device *platdev,
53 dev->platformdev = platdev; 53 dev->platformdev = platdev;
54 dev->dev = &platdev->dev; 54 dev->dev = &platdev->dev;
55 55
56 mutex_lock(&drm_global_mutex);
57
56 ret = drm_fill_in_dev(dev, NULL, driver); 58 ret = drm_fill_in_dev(dev, NULL, driver);
57 59
58 if (ret) { 60 if (ret) {
@@ -87,6 +89,8 @@ int drm_get_platform_dev(struct platform_device *platdev,
87 89
88 list_add_tail(&dev->driver_item, &driver->device_list); 90 list_add_tail(&dev->driver_item, &driver->device_list);
89 91
92 mutex_unlock(&drm_global_mutex);
93
90 DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", 94 DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
91 driver->name, driver->major, driver->minor, driver->patchlevel, 95 driver->name, driver->major, driver->minor, driver->patchlevel,
92 driver->date, dev->primary->index); 96 driver->date, dev->primary->index);
@@ -100,6 +104,7 @@ err_g2:
100 drm_put_minor(&dev->control); 104 drm_put_minor(&dev->control);
101err_g1: 105err_g1:
102 kfree(dev); 106 kfree(dev);
107 mutex_unlock(&drm_global_mutex);
103 return ret; 108 return ret;
104} 109}
105EXPORT_SYMBOL(drm_get_platform_dev); 110EXPORT_SYMBOL(drm_get_platform_dev);
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 86118a742231..85da4c40694c 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -159,7 +159,7 @@ static ssize_t status_show(struct device *device,
159 struct drm_connector *connector = to_drm_connector(device); 159 struct drm_connector *connector = to_drm_connector(device);
160 enum drm_connector_status status; 160 enum drm_connector_status status;
161 161
162 status = connector->funcs->detect(connector); 162 status = connector->funcs->detect(connector, true);
163 return snprintf(buf, PAGE_SIZE, "%s\n", 163 return snprintf(buf, PAGE_SIZE, "%s\n",
164 drm_get_connector_status_name(status)); 164 drm_get_connector_status_name(status));
165} 165}
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index 3778360eceea..5df450683aab 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -138,7 +138,7 @@ static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
138 break; 138 break;
139 } 139 }
140 140
141 if (!agpmem) 141 if (&agpmem->head == &dev->agp->memory)
142 goto vm_fault_error; 142 goto vm_fault_error;
143 143
144 /* 144 /*
@@ -433,15 +433,7 @@ static void drm_vm_open(struct vm_area_struct *vma)
433 mutex_unlock(&dev->struct_mutex); 433 mutex_unlock(&dev->struct_mutex);
434} 434}
435 435
436/** 436void drm_vm_close_locked(struct vm_area_struct *vma)
437 * \c close method for all virtual memory types.
438 *
439 * \param vma virtual memory area.
440 *
441 * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
442 * free it.
443 */
444static void drm_vm_close(struct vm_area_struct *vma)
445{ 437{
446 struct drm_file *priv = vma->vm_file->private_data; 438 struct drm_file *priv = vma->vm_file->private_data;
447 struct drm_device *dev = priv->minor->dev; 439 struct drm_device *dev = priv->minor->dev;
@@ -451,7 +443,6 @@ static void drm_vm_close(struct vm_area_struct *vma)
451 vma->vm_start, vma->vm_end - vma->vm_start); 443 vma->vm_start, vma->vm_end - vma->vm_start);
452 atomic_dec(&dev->vma_count); 444 atomic_dec(&dev->vma_count);
453 445
454 mutex_lock(&dev->struct_mutex);
455 list_for_each_entry_safe(pt, temp, &dev->vmalist, head) { 446 list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
456 if (pt->vma == vma) { 447 if (pt->vma == vma) {
457 list_del(&pt->head); 448 list_del(&pt->head);
@@ -459,6 +450,23 @@ static void drm_vm_close(struct vm_area_struct *vma)
459 break; 450 break;
460 } 451 }
461 } 452 }
453}
454
455/**
456 * \c close method for all virtual memory types.
457 *
458 * \param vma virtual memory area.
459 *
460 * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
461 * free it.
462 */
463static void drm_vm_close(struct vm_area_struct *vma)
464{
465 struct drm_file *priv = vma->vm_file->private_data;
466 struct drm_device *dev = priv->minor->dev;
467
468 mutex_lock(&dev->struct_mutex);
469 drm_vm_close_locked(vma);
462 mutex_unlock(&dev->struct_mutex); 470 mutex_unlock(&dev->struct_mutex);
463} 471}
464 472
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index 0e6c131313d9..fb07e73581e8 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -116,7 +116,7 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
116static const struct file_operations i810_buffer_fops = { 116static const struct file_operations i810_buffer_fops = {
117 .open = drm_open, 117 .open = drm_open,
118 .release = drm_release, 118 .release = drm_release,
119 .unlocked_ioctl = drm_ioctl, 119 .unlocked_ioctl = i810_ioctl,
120 .mmap = i810_mmap_buffers, 120 .mmap = i810_mmap_buffers,
121 .fasync = drm_fasync, 121 .fasync = drm_fasync,
122}; 122};
@@ -1255,21 +1255,21 @@ long i810_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1255} 1255}
1256 1256
1257struct drm_ioctl_desc i810_ioctls[] = { 1257struct drm_ioctl_desc i810_ioctls[] = {
1258 DRM_IOCTL_DEF(DRM_I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), 1258 DRM_IOCTL_DEF_DRV(I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1259 DRM_IOCTL_DEF(DRM_I810_VERTEX, i810_dma_vertex, DRM_AUTH|DRM_UNLOCKED), 1259 DRM_IOCTL_DEF_DRV(I810_VERTEX, i810_dma_vertex, DRM_AUTH|DRM_UNLOCKED),
1260 DRM_IOCTL_DEF(DRM_I810_CLEAR, i810_clear_bufs, DRM_AUTH|DRM_UNLOCKED), 1260 DRM_IOCTL_DEF_DRV(I810_CLEAR, i810_clear_bufs, DRM_AUTH|DRM_UNLOCKED),
1261 DRM_IOCTL_DEF(DRM_I810_FLUSH, i810_flush_ioctl, DRM_AUTH|DRM_UNLOCKED), 1261 DRM_IOCTL_DEF_DRV(I810_FLUSH, i810_flush_ioctl, DRM_AUTH|DRM_UNLOCKED),
1262 DRM_IOCTL_DEF(DRM_I810_GETAGE, i810_getage, DRM_AUTH|DRM_UNLOCKED), 1262 DRM_IOCTL_DEF_DRV(I810_GETAGE, i810_getage, DRM_AUTH|DRM_UNLOCKED),
1263 DRM_IOCTL_DEF(DRM_I810_GETBUF, i810_getbuf, DRM_AUTH|DRM_UNLOCKED), 1263 DRM_IOCTL_DEF_DRV(I810_GETBUF, i810_getbuf, DRM_AUTH|DRM_UNLOCKED),
1264 DRM_IOCTL_DEF(DRM_I810_SWAP, i810_swap_bufs, DRM_AUTH|DRM_UNLOCKED), 1264 DRM_IOCTL_DEF_DRV(I810_SWAP, i810_swap_bufs, DRM_AUTH|DRM_UNLOCKED),
1265 DRM_IOCTL_DEF(DRM_I810_COPY, i810_copybuf, DRM_AUTH|DRM_UNLOCKED), 1265 DRM_IOCTL_DEF_DRV(I810_COPY, i810_copybuf, DRM_AUTH|DRM_UNLOCKED),
1266 DRM_IOCTL_DEF(DRM_I810_DOCOPY, i810_docopy, DRM_AUTH|DRM_UNLOCKED), 1266 DRM_IOCTL_DEF_DRV(I810_DOCOPY, i810_docopy, DRM_AUTH|DRM_UNLOCKED),
1267 DRM_IOCTL_DEF(DRM_I810_OV0INFO, i810_ov0_info, DRM_AUTH|DRM_UNLOCKED), 1267 DRM_IOCTL_DEF_DRV(I810_OV0INFO, i810_ov0_info, DRM_AUTH|DRM_UNLOCKED),
1268 DRM_IOCTL_DEF(DRM_I810_FSTATUS, i810_fstatus, DRM_AUTH|DRM_UNLOCKED), 1268 DRM_IOCTL_DEF_DRV(I810_FSTATUS, i810_fstatus, DRM_AUTH|DRM_UNLOCKED),
1269 DRM_IOCTL_DEF(DRM_I810_OV0FLIP, i810_ov0_flip, DRM_AUTH|DRM_UNLOCKED), 1269 DRM_IOCTL_DEF_DRV(I810_OV0FLIP, i810_ov0_flip, DRM_AUTH|DRM_UNLOCKED),
1270 DRM_IOCTL_DEF(DRM_I810_MC, i810_dma_mc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), 1270 DRM_IOCTL_DEF_DRV(I810_MC, i810_dma_mc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1271 DRM_IOCTL_DEF(DRM_I810_RSTATUS, i810_rstatus, DRM_AUTH|DRM_UNLOCKED), 1271 DRM_IOCTL_DEF_DRV(I810_RSTATUS, i810_rstatus, DRM_AUTH|DRM_UNLOCKED),
1272 DRM_IOCTL_DEF(DRM_I810_FLIP, i810_flip_bufs, DRM_AUTH|DRM_UNLOCKED), 1272 DRM_IOCTL_DEF_DRV(I810_FLIP, i810_flip_bufs, DRM_AUTH|DRM_UNLOCKED),
1273}; 1273};
1274 1274
1275int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls); 1275int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls);
diff --git a/drivers/gpu/drm/i830/i830_dma.c b/drivers/gpu/drm/i830/i830_dma.c
index 5168862c9227..cc92c7e6236f 100644
--- a/drivers/gpu/drm/i830/i830_dma.c
+++ b/drivers/gpu/drm/i830/i830_dma.c
@@ -118,7 +118,7 @@ static int i830_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
118static const struct file_operations i830_buffer_fops = { 118static const struct file_operations i830_buffer_fops = {
119 .open = drm_open, 119 .open = drm_open,
120 .release = drm_release, 120 .release = drm_release,
121 .unlocked_ioctl = drm_ioctl, 121 .unlocked_ioctl = i830_ioctl,
122 .mmap = i830_mmap_buffers, 122 .mmap = i830_mmap_buffers,
123 .fasync = drm_fasync, 123 .fasync = drm_fasync,
124}; 124};
@@ -1524,20 +1524,20 @@ long i830_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1524} 1524}
1525 1525
1526struct drm_ioctl_desc i830_ioctls[] = { 1526struct drm_ioctl_desc i830_ioctls[] = {
1527 DRM_IOCTL_DEF(DRM_I830_INIT, i830_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), 1527 DRM_IOCTL_DEF_DRV(I830_INIT, i830_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1528 DRM_IOCTL_DEF(DRM_I830_VERTEX, i830_dma_vertex, DRM_AUTH|DRM_UNLOCKED), 1528 DRM_IOCTL_DEF_DRV(I830_VERTEX, i830_dma_vertex, DRM_AUTH|DRM_UNLOCKED),
1529 DRM_IOCTL_DEF(DRM_I830_CLEAR, i830_clear_bufs, DRM_AUTH|DRM_UNLOCKED), 1529 DRM_IOCTL_DEF_DRV(I830_CLEAR, i830_clear_bufs, DRM_AUTH|DRM_UNLOCKED),
1530 DRM_IOCTL_DEF(DRM_I830_FLUSH, i830_flush_ioctl, DRM_AUTH|DRM_UNLOCKED), 1530 DRM_IOCTL_DEF_DRV(I830_FLUSH, i830_flush_ioctl, DRM_AUTH|DRM_UNLOCKED),
1531 DRM_IOCTL_DEF(DRM_I830_GETAGE, i830_getage, DRM_AUTH|DRM_UNLOCKED), 1531 DRM_IOCTL_DEF_DRV(I830_GETAGE, i830_getage, DRM_AUTH|DRM_UNLOCKED),
1532 DRM_IOCTL_DEF(DRM_I830_GETBUF, i830_getbuf, DRM_AUTH|DRM_UNLOCKED), 1532 DRM_IOCTL_DEF_DRV(I830_GETBUF, i830_getbuf, DRM_AUTH|DRM_UNLOCKED),
1533 DRM_IOCTL_DEF(DRM_I830_SWAP, i830_swap_bufs, DRM_AUTH|DRM_UNLOCKED), 1533 DRM_IOCTL_DEF_DRV(I830_SWAP, i830_swap_bufs, DRM_AUTH|DRM_UNLOCKED),
1534 DRM_IOCTL_DEF(DRM_I830_COPY, i830_copybuf, DRM_AUTH|DRM_UNLOCKED), 1534 DRM_IOCTL_DEF_DRV(I830_COPY, i830_copybuf, DRM_AUTH|DRM_UNLOCKED),
1535 DRM_IOCTL_DEF(DRM_I830_DOCOPY, i830_docopy, DRM_AUTH|DRM_UNLOCKED), 1535 DRM_IOCTL_DEF_DRV(I830_DOCOPY, i830_docopy, DRM_AUTH|DRM_UNLOCKED),
1536 DRM_IOCTL_DEF(DRM_I830_FLIP, i830_flip_bufs, DRM_AUTH|DRM_UNLOCKED), 1536 DRM_IOCTL_DEF_DRV(I830_FLIP, i830_flip_bufs, DRM_AUTH|DRM_UNLOCKED),
1537 DRM_IOCTL_DEF(DRM_I830_IRQ_EMIT, i830_irq_emit, DRM_AUTH|DRM_UNLOCKED), 1537 DRM_IOCTL_DEF_DRV(I830_IRQ_EMIT, i830_irq_emit, DRM_AUTH|DRM_UNLOCKED),
1538 DRM_IOCTL_DEF(DRM_I830_IRQ_WAIT, i830_irq_wait, DRM_AUTH|DRM_UNLOCKED), 1538 DRM_IOCTL_DEF_DRV(I830_IRQ_WAIT, i830_irq_wait, DRM_AUTH|DRM_UNLOCKED),
1539 DRM_IOCTL_DEF(DRM_I830_GETPARAM, i830_getparam, DRM_AUTH|DRM_UNLOCKED), 1539 DRM_IOCTL_DEF_DRV(I830_GETPARAM, i830_getparam, DRM_AUTH|DRM_UNLOCKED),
1540 DRM_IOCTL_DEF(DRM_I830_SETPARAM, i830_setparam, DRM_AUTH|DRM_UNLOCKED), 1540 DRM_IOCTL_DEF_DRV(I830_SETPARAM, i830_setparam, DRM_AUTH|DRM_UNLOCKED),
1541}; 1541};
1542 1542
1543int i830_max_ioctl = DRM_ARRAY_SIZE(i830_ioctls); 1543int i830_max_ioctl = DRM_ARRAY_SIZE(i830_ioctls);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 92d5605a34d1..5e43d7076789 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -31,6 +31,7 @@
31#include <linux/slab.h> 31#include <linux/slab.h>
32#include "drmP.h" 32#include "drmP.h"
33#include "drm.h" 33#include "drm.h"
34#include "intel_drv.h"
34#include "i915_drm.h" 35#include "i915_drm.h"
35#include "i915_drv.h" 36#include "i915_drv.h"
36 37
@@ -121,6 +122,54 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
121 return 0; 122 return 0;
122} 123}
123 124
125static int i915_gem_pageflip_info(struct seq_file *m, void *data)
126{
127 struct drm_info_node *node = (struct drm_info_node *) m->private;
128 struct drm_device *dev = node->minor->dev;
129 unsigned long flags;
130 struct intel_crtc *crtc;
131
132 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
133 const char *pipe = crtc->pipe ? "B" : "A";
134 const char *plane = crtc->plane ? "B" : "A";
135 struct intel_unpin_work *work;
136
137 spin_lock_irqsave(&dev->event_lock, flags);
138 work = crtc->unpin_work;
139 if (work == NULL) {
140 seq_printf(m, "No flip due on pipe %s (plane %s)\n",
141 pipe, plane);
142 } else {
143 if (!work->pending) {
144 seq_printf(m, "Flip queued on pipe %s (plane %s)\n",
145 pipe, plane);
146 } else {
147 seq_printf(m, "Flip pending (waiting for vsync) on pipe %s (plane %s)\n",
148 pipe, plane);
149 }
150 if (work->enable_stall_check)
151 seq_printf(m, "Stall check enabled, ");
152 else
153 seq_printf(m, "Stall check waiting for page flip ioctl, ");
154 seq_printf(m, "%d prepares\n", work->pending);
155
156 if (work->old_fb_obj) {
157 struct drm_i915_gem_object *obj_priv = to_intel_bo(work->old_fb_obj);
158 if(obj_priv)
159 seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset );
160 }
161 if (work->pending_flip_obj) {
162 struct drm_i915_gem_object *obj_priv = to_intel_bo(work->pending_flip_obj);
163 if(obj_priv)
164 seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset );
165 }
166 }
167 spin_unlock_irqrestore(&dev->event_lock, flags);
168 }
169
170 return 0;
171}
172
124static int i915_gem_request_info(struct seq_file *m, void *data) 173static int i915_gem_request_info(struct seq_file *m, void *data)
125{ 174{
126 struct drm_info_node *node = (struct drm_info_node *) m->private; 175 struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -777,6 +826,7 @@ static struct drm_info_list i915_debugfs_list[] = {
777 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, 826 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
778 {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, 827 {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
779 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, 828 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
829 {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
780 {"i915_gem_request", i915_gem_request_info, 0}, 830 {"i915_gem_request", i915_gem_request_info, 0},
781 {"i915_gem_seqno", i915_gem_seqno_info, 0}, 831 {"i915_gem_seqno", i915_gem_seqno_info, 0},
782 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, 832 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 44af317731b6..2dd2c93ebfa3 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -620,8 +620,10 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
620 ret = copy_from_user(cliprects, batch->cliprects, 620 ret = copy_from_user(cliprects, batch->cliprects,
621 batch->num_cliprects * 621 batch->num_cliprects *
622 sizeof(struct drm_clip_rect)); 622 sizeof(struct drm_clip_rect));
623 if (ret != 0) 623 if (ret != 0) {
624 ret = -EFAULT;
624 goto fail_free; 625 goto fail_free;
626 }
625 } 627 }
626 628
627 mutex_lock(&dev->struct_mutex); 629 mutex_lock(&dev->struct_mutex);
@@ -662,8 +664,10 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
662 return -ENOMEM; 664 return -ENOMEM;
663 665
664 ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz); 666 ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz);
665 if (ret != 0) 667 if (ret != 0) {
668 ret = -EFAULT;
666 goto fail_batch_free; 669 goto fail_batch_free;
670 }
667 671
668 if (cmdbuf->num_cliprects) { 672 if (cmdbuf->num_cliprects) {
669 cliprects = kcalloc(cmdbuf->num_cliprects, 673 cliprects = kcalloc(cmdbuf->num_cliprects,
@@ -676,8 +680,10 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
676 ret = copy_from_user(cliprects, cmdbuf->cliprects, 680 ret = copy_from_user(cliprects, cmdbuf->cliprects,
677 cmdbuf->num_cliprects * 681 cmdbuf->num_cliprects *
678 sizeof(struct drm_clip_rect)); 682 sizeof(struct drm_clip_rect));
679 if (ret != 0) 683 if (ret != 0) {
684 ret = -EFAULT;
680 goto fail_clip_free; 685 goto fail_clip_free;
686 }
681 } 687 }
682 688
683 mutex_lock(&dev->struct_mutex); 689 mutex_lock(&dev->struct_mutex);
@@ -885,7 +891,7 @@ intel_alloc_mchbar_resource(struct drm_device *dev)
885 int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915; 891 int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
886 u32 temp_lo, temp_hi = 0; 892 u32 temp_lo, temp_hi = 0;
887 u64 mchbar_addr; 893 u64 mchbar_addr;
888 int ret = 0; 894 int ret;
889 895
890 if (IS_I965G(dev)) 896 if (IS_I965G(dev))
891 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi); 897 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
@@ -895,22 +901,23 @@ intel_alloc_mchbar_resource(struct drm_device *dev)
895 /* If ACPI doesn't have it, assume we need to allocate it ourselves */ 901 /* If ACPI doesn't have it, assume we need to allocate it ourselves */
896#ifdef CONFIG_PNP 902#ifdef CONFIG_PNP
897 if (mchbar_addr && 903 if (mchbar_addr &&
898 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) { 904 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
899 ret = 0; 905 return 0;
900 goto out;
901 }
902#endif 906#endif
903 907
904 /* Get some space for it */ 908 /* Get some space for it */
905 ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, &dev_priv->mch_res, 909 dev_priv->mch_res.name = "i915 MCHBAR";
910 dev_priv->mch_res.flags = IORESOURCE_MEM;
911 ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
912 &dev_priv->mch_res,
906 MCHBAR_SIZE, MCHBAR_SIZE, 913 MCHBAR_SIZE, MCHBAR_SIZE,
907 PCIBIOS_MIN_MEM, 914 PCIBIOS_MIN_MEM,
908 0, pcibios_align_resource, 915 0, pcibios_align_resource,
909 dev_priv->bridge_dev); 916 dev_priv->bridge_dev);
910 if (ret) { 917 if (ret) {
911 DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret); 918 DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
912 dev_priv->mch_res.start = 0; 919 dev_priv->mch_res.start = 0;
913 goto out; 920 return ret;
914 } 921 }
915 922
916 if (IS_I965G(dev)) 923 if (IS_I965G(dev))
@@ -919,8 +926,7 @@ intel_alloc_mchbar_resource(struct drm_device *dev)
919 926
920 pci_write_config_dword(dev_priv->bridge_dev, reg, 927 pci_write_config_dword(dev_priv->bridge_dev, reg,
921 lower_32_bits(dev_priv->mch_res.start)); 928 lower_32_bits(dev_priv->mch_res.start));
922out: 929 return 0;
923 return ret;
924} 930}
925 931
926/* Setup MCHBAR if possible, return true if we should disable it again */ 932/* Setup MCHBAR if possible, return true if we should disable it again */
@@ -1781,9 +1787,9 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
1781 } 1787 }
1782 } 1788 }
1783 1789
1784 div_u64(diff, diff1); 1790 diff = div_u64(diff, diff1);
1785 ret = ((m * diff) + c); 1791 ret = ((m * diff) + c);
1786 div_u64(ret, 10); 1792 ret = div_u64(ret, 10);
1787 1793
1788 dev_priv->last_count1 = total_count; 1794 dev_priv->last_count1 = total_count;
1789 dev_priv->last_time1 = now; 1795 dev_priv->last_time1 = now;
@@ -1852,7 +1858,7 @@ void i915_update_gfx_val(struct drm_i915_private *dev_priv)
1852 1858
1853 /* More magic constants... */ 1859 /* More magic constants... */
1854 diff = diff * 1181; 1860 diff = diff * 1181;
1855 div_u64(diff, diffms * 10); 1861 diff = div_u64(diff, diffms * 10);
1856 dev_priv->gfx_power = diff; 1862 dev_priv->gfx_power = diff;
1857} 1863}
1858 1864
@@ -2082,6 +2088,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
2082 goto free_priv; 2088 goto free_priv;
2083 } 2089 }
2084 2090
2091 /* overlay on gen2 is broken and can't address above 1G */
2092 if (IS_GEN2(dev))
2093 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
2094
2085 dev_priv->regs = ioremap(base, size); 2095 dev_priv->regs = ioremap(base, size);
2086 if (!dev_priv->regs) { 2096 if (!dev_priv->regs) {
2087 DRM_ERROR("failed to map registers\n"); 2097 DRM_ERROR("failed to map registers\n");
@@ -2221,6 +2231,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
2221 dev_priv->mchdev_lock = &mchdev_lock; 2231 dev_priv->mchdev_lock = &mchdev_lock;
2222 spin_unlock(&mchdev_lock); 2232 spin_unlock(&mchdev_lock);
2223 2233
2234 /* XXX Prevent module unload due to memory corruption bugs. */
2235 __module_get(THIS_MODULE);
2236
2224 return 0; 2237 return 0;
2225 2238
2226out_workqueue_free: 2239out_workqueue_free:
@@ -2367,46 +2380,46 @@ void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
2367} 2380}
2368 2381
2369struct drm_ioctl_desc i915_ioctls[] = { 2382struct drm_ioctl_desc i915_ioctls[] = {
2370 DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2383 DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2371 DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH), 2384 DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
2372 DRM_IOCTL_DEF(DRM_I915_FLIP, i915_flip_bufs, DRM_AUTH), 2385 DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH),
2373 DRM_IOCTL_DEF(DRM_I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH), 2386 DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
2374 DRM_IOCTL_DEF(DRM_I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH), 2387 DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
2375 DRM_IOCTL_DEF(DRM_I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH), 2388 DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
2376 DRM_IOCTL_DEF(DRM_I915_GETPARAM, i915_getparam, DRM_AUTH), 2389 DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH),
2377 DRM_IOCTL_DEF(DRM_I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2390 DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2378 DRM_IOCTL_DEF(DRM_I915_ALLOC, i915_mem_alloc, DRM_AUTH), 2391 DRM_IOCTL_DEF_DRV(I915_ALLOC, i915_mem_alloc, DRM_AUTH),
2379 DRM_IOCTL_DEF(DRM_I915_FREE, i915_mem_free, DRM_AUTH), 2392 DRM_IOCTL_DEF_DRV(I915_FREE, i915_mem_free, DRM_AUTH),
2380 DRM_IOCTL_DEF(DRM_I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2393 DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2381 DRM_IOCTL_DEF(DRM_I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH), 2394 DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
2382 DRM_IOCTL_DEF(DRM_I915_DESTROY_HEAP, i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ), 2395 DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2383 DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ), 2396 DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2384 DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH ), 2397 DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH),
2385 DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH), 2398 DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
2386 DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2399 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2387 DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), 2400 DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
2388 DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED), 2401 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
2389 DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED), 2402 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED),
2390 DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), 2403 DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
2391 DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), 2404 DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
2392 DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED), 2405 DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
2393 DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED), 2406 DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED),
2394 DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), 2407 DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
2395 DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), 2408 DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
2396 DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED), 2409 DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED),
2397 DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED), 2410 DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED),
2398 DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED), 2411 DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED),
2399 DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED), 2412 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED),
2400 DRM_IOCTL_DEF(DRM_I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED), 2413 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED),
2401 DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED), 2414 DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED),
2402 DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED), 2415 DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED),
2403 DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED), 2416 DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED),
2404 DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED), 2417 DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED),
2405 DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED), 2418 DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED),
2406 DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED), 2419 DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
2407 DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED), 2420 DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED),
2408 DRM_IOCTL_DEF(DRM_I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 2421 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
2409 DRM_IOCTL_DEF(DRM_I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 2422 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
2410}; 2423};
2411 2424
2412int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); 2425int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 00befce8fbb7..6dbe14cc4f74 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -61,91 +61,86 @@ extern int intel_agp_enabled;
61 .driver_data = (unsigned long) info } 61 .driver_data = (unsigned long) info }
62 62
63static const struct intel_device_info intel_i830_info = { 63static const struct intel_device_info intel_i830_info = {
64 .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1, 64 .gen = 2, .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1,
65}; 65};
66 66
67static const struct intel_device_info intel_845g_info = { 67static const struct intel_device_info intel_845g_info = {
68 .is_i8xx = 1, 68 .gen = 2, .is_i8xx = 1,
69}; 69};
70 70
71static const struct intel_device_info intel_i85x_info = { 71static const struct intel_device_info intel_i85x_info = {
72 .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1, 72 .gen = 2, .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1,
73 .cursor_needs_physical = 1, 73 .cursor_needs_physical = 1,
74}; 74};
75 75
76static const struct intel_device_info intel_i865g_info = { 76static const struct intel_device_info intel_i865g_info = {
77 .is_i8xx = 1, 77 .gen = 2, .is_i8xx = 1,
78}; 78};
79 79
80static const struct intel_device_info intel_i915g_info = { 80static const struct intel_device_info intel_i915g_info = {
81 .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1, 81 .gen = 3, .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1,
82}; 82};
83static const struct intel_device_info intel_i915gm_info = { 83static const struct intel_device_info intel_i915gm_info = {
84 .is_i9xx = 1, .is_mobile = 1, 84 .gen = 3, .is_i9xx = 1, .is_mobile = 1,
85 .cursor_needs_physical = 1, 85 .cursor_needs_physical = 1,
86}; 86};
87static const struct intel_device_info intel_i945g_info = { 87static const struct intel_device_info intel_i945g_info = {
88 .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1, 88 .gen = 3, .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1,
89}; 89};
90static const struct intel_device_info intel_i945gm_info = { 90static const struct intel_device_info intel_i945gm_info = {
91 .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1, 91 .gen = 3, .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1,
92 .has_hotplug = 1, .cursor_needs_physical = 1, 92 .has_hotplug = 1, .cursor_needs_physical = 1,
93}; 93};
94 94
95static const struct intel_device_info intel_i965g_info = { 95static const struct intel_device_info intel_i965g_info = {
96 .is_broadwater = 1, .is_i965g = 1, .is_i9xx = 1, .has_hotplug = 1, 96 .gen = 4, .is_broadwater = 1, .is_i965g = 1, .is_i9xx = 1,
97 .has_hotplug = 1,
97}; 98};
98 99
99static const struct intel_device_info intel_i965gm_info = { 100static const struct intel_device_info intel_i965gm_info = {
100 .is_crestline = 1, .is_i965g = 1, .is_i965gm = 1, .is_i9xx = 1, 101 .gen = 4, .is_crestline = 1, .is_i965g = 1, .is_i965gm = 1, .is_i9xx = 1,
101 .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, 102 .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1,
102 .has_hotplug = 1,
103}; 103};
104 104
105static const struct intel_device_info intel_g33_info = { 105static const struct intel_device_info intel_g33_info = {
106 .is_g33 = 1, .is_i9xx = 1, .need_gfx_hws = 1, 106 .gen = 3, .is_g33 = 1, .is_i9xx = 1,
107 .has_hotplug = 1, 107 .need_gfx_hws = 1, .has_hotplug = 1,
108}; 108};
109 109
110static const struct intel_device_info intel_g45_info = { 110static const struct intel_device_info intel_g45_info = {
111 .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1, 111 .gen = 4, .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1,
112 .has_pipe_cxsr = 1, 112 .has_pipe_cxsr = 1, .has_hotplug = 1,
113 .has_hotplug = 1,
114}; 113};
115 114
116static const struct intel_device_info intel_gm45_info = { 115static const struct intel_device_info intel_gm45_info = {
117 .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, 116 .gen = 4, .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1,
118 .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, 117 .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1,
119 .has_pipe_cxsr = 1, 118 .has_pipe_cxsr = 1, .has_hotplug = 1,
120 .has_hotplug = 1,
121}; 119};
122 120
123static const struct intel_device_info intel_pineview_info = { 121static const struct intel_device_info intel_pineview_info = {
124 .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1, 122 .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1,
125 .need_gfx_hws = 1, 123 .need_gfx_hws = 1, .has_hotplug = 1,
126 .has_hotplug = 1,
127}; 124};
128 125
129static const struct intel_device_info intel_ironlake_d_info = { 126static const struct intel_device_info intel_ironlake_d_info = {
130 .is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1, 127 .gen = 5, .is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1,
131 .has_pipe_cxsr = 1, 128 .need_gfx_hws = 1, .has_pipe_cxsr = 1, .has_hotplug = 1,
132 .has_hotplug = 1,
133}; 129};
134 130
135static const struct intel_device_info intel_ironlake_m_info = { 131static const struct intel_device_info intel_ironlake_m_info = {
136 .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1, 132 .gen = 5, .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1,
137 .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, 133 .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1,
138 .has_hotplug = 1,
139}; 134};
140 135
141static const struct intel_device_info intel_sandybridge_d_info = { 136static const struct intel_device_info intel_sandybridge_d_info = {
142 .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1, 137 .gen = 6, .is_i965g = 1, .is_i9xx = 1,
143 .has_hotplug = 1, .is_gen6 = 1, 138 .need_gfx_hws = 1, .has_hotplug = 1,
144}; 139};
145 140
146static const struct intel_device_info intel_sandybridge_m_info = { 141static const struct intel_device_info intel_sandybridge_m_info = {
147 .is_i965g = 1, .is_mobile = 1, .is_i9xx = 1, .need_gfx_hws = 1, 142 .gen = 6, .is_i965g = 1, .is_mobile = 1, .is_i9xx = 1,
148 .has_hotplug = 1, .is_gen6 = 1, 143 .need_gfx_hws = 1, .has_hotplug = 1,
149}; 144};
150 145
151static const struct pci_device_id pciidlist[] = { /* aka */ 146static const struct pci_device_id pciidlist[] = { /* aka */
@@ -175,13 +170,18 @@ static const struct pci_device_id pciidlist[] = { /* aka */
175 INTEL_VGA_DEVICE(0x2e22, &intel_g45_info), /* G45_G */ 170 INTEL_VGA_DEVICE(0x2e22, &intel_g45_info), /* G45_G */
176 INTEL_VGA_DEVICE(0x2e32, &intel_g45_info), /* G41_G */ 171 INTEL_VGA_DEVICE(0x2e32, &intel_g45_info), /* G41_G */
177 INTEL_VGA_DEVICE(0x2e42, &intel_g45_info), /* B43_G */ 172 INTEL_VGA_DEVICE(0x2e42, &intel_g45_info), /* B43_G */
173 INTEL_VGA_DEVICE(0x2e92, &intel_g45_info), /* B43_G.1 */
178 INTEL_VGA_DEVICE(0xa001, &intel_pineview_info), 174 INTEL_VGA_DEVICE(0xa001, &intel_pineview_info),
179 INTEL_VGA_DEVICE(0xa011, &intel_pineview_info), 175 INTEL_VGA_DEVICE(0xa011, &intel_pineview_info),
180 INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info), 176 INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
181 INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info), 177 INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info),
182 INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info), 178 INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info),
179 INTEL_VGA_DEVICE(0x0112, &intel_sandybridge_d_info),
180 INTEL_VGA_DEVICE(0x0122, &intel_sandybridge_d_info),
183 INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info), 181 INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info),
182 INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info),
184 INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info), 183 INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info),
184 INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info),
185 {0, 0, 0} 185 {0, 0, 0}
186}; 186};
187 187
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 047cd7ce7e1b..af4a263cf257 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -191,6 +191,7 @@ struct drm_i915_display_funcs {
191}; 191};
192 192
193struct intel_device_info { 193struct intel_device_info {
194 u8 gen;
194 u8 is_mobile : 1; 195 u8 is_mobile : 1;
195 u8 is_i8xx : 1; 196 u8 is_i8xx : 1;
196 u8 is_i85x : 1; 197 u8 is_i85x : 1;
@@ -206,7 +207,6 @@ struct intel_device_info {
206 u8 is_broadwater : 1; 207 u8 is_broadwater : 1;
207 u8 is_crestline : 1; 208 u8 is_crestline : 1;
208 u8 is_ironlake : 1; 209 u8 is_ironlake : 1;
209 u8 is_gen6 : 1;
210 u8 has_fbc : 1; 210 u8 has_fbc : 1;
211 u8 has_rc6 : 1; 211 u8 has_rc6 : 1;
212 u8 has_pipe_cxsr : 1; 212 u8 has_pipe_cxsr : 1;
@@ -1162,7 +1162,6 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove
1162#define IS_845G(dev) ((dev)->pci_device == 0x2562) 1162#define IS_845G(dev) ((dev)->pci_device == 0x2562)
1163#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x) 1163#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
1164#define IS_I865G(dev) ((dev)->pci_device == 0x2572) 1164#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
1165#define IS_GEN2(dev) (INTEL_INFO(dev)->is_i8xx)
1166#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) 1165#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
1167#define IS_I915GM(dev) ((dev)->pci_device == 0x2592) 1166#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
1168#define IS_I945G(dev) ((dev)->pci_device == 0x2772) 1167#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
@@ -1181,27 +1180,13 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove
1181#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) 1180#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
1182#define IS_IRONLAKE(dev) (INTEL_INFO(dev)->is_ironlake) 1181#define IS_IRONLAKE(dev) (INTEL_INFO(dev)->is_ironlake)
1183#define IS_I9XX(dev) (INTEL_INFO(dev)->is_i9xx) 1182#define IS_I9XX(dev) (INTEL_INFO(dev)->is_i9xx)
1184#define IS_GEN6(dev) (INTEL_INFO(dev)->is_gen6)
1185#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) 1183#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
1186 1184
1187#define IS_GEN3(dev) (IS_I915G(dev) || \ 1185#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2)
1188 IS_I915GM(dev) || \ 1186#define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3)
1189 IS_I945G(dev) || \ 1187#define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4)
1190 IS_I945GM(dev) || \ 1188#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5)
1191 IS_G33(dev) || \ 1189#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
1192 IS_PINEVIEW(dev))
1193#define IS_GEN4(dev) ((dev)->pci_device == 0x2972 || \
1194 (dev)->pci_device == 0x2982 || \
1195 (dev)->pci_device == 0x2992 || \
1196 (dev)->pci_device == 0x29A2 || \
1197 (dev)->pci_device == 0x2A02 || \
1198 (dev)->pci_device == 0x2A12 || \
1199 (dev)->pci_device == 0x2E02 || \
1200 (dev)->pci_device == 0x2E12 || \
1201 (dev)->pci_device == 0x2E22 || \
1202 (dev)->pci_device == 0x2E32 || \
1203 (dev)->pci_device == 0x2A42 || \
1204 (dev)->pci_device == 0x2E42)
1205 1190
1206#define HAS_BSD(dev) (IS_IRONLAKE(dev) || IS_G4X(dev)) 1191#define HAS_BSD(dev) (IS_IRONLAKE(dev) || IS_G4X(dev))
1207#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) 1192#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index df5a7135c261..90b1d6753b9d 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -34,6 +34,7 @@
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <linux/swap.h> 35#include <linux/swap.h>
36#include <linux/pci.h> 36#include <linux/pci.h>
37#include <linux/intel-gtt.h>
37 38
38static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj); 39static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj);
39static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); 40static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
@@ -135,12 +136,13 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
135 return -ENOMEM; 136 return -ENOMEM;
136 137
137 ret = drm_gem_handle_create(file_priv, obj, &handle); 138 ret = drm_gem_handle_create(file_priv, obj, &handle);
139 /* drop reference from allocate - handle holds it now */
138 drm_gem_object_unreference_unlocked(obj); 140 drm_gem_object_unreference_unlocked(obj);
139 if (ret) 141 if (ret) {
140 return ret; 142 return ret;
143 }
141 144
142 args->handle = handle; 145 args->handle = handle;
143
144 return 0; 146 return 0;
145} 147}
146 148
@@ -467,14 +469,17 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
467 return -ENOENT; 469 return -ENOENT;
468 obj_priv = to_intel_bo(obj); 470 obj_priv = to_intel_bo(obj);
469 471
470 /* Bounds check source. 472 /* Bounds check source. */
471 * 473 if (args->offset > obj->size || args->size > obj->size - args->offset) {
472 * XXX: This could use review for overflow issues... 474 ret = -EINVAL;
473 */ 475 goto err;
474 if (args->offset > obj->size || args->size > obj->size || 476 }
475 args->offset + args->size > obj->size) { 477
476 drm_gem_object_unreference_unlocked(obj); 478 if (!access_ok(VERIFY_WRITE,
477 return -EINVAL; 479 (char __user *)(uintptr_t)args->data_ptr,
480 args->size)) {
481 ret = -EFAULT;
482 goto err;
478 } 483 }
479 484
480 if (i915_gem_object_needs_bit17_swizzle(obj)) { 485 if (i915_gem_object_needs_bit17_swizzle(obj)) {
@@ -486,8 +491,8 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
486 file_priv); 491 file_priv);
487 } 492 }
488 493
494err:
489 drm_gem_object_unreference_unlocked(obj); 495 drm_gem_object_unreference_unlocked(obj);
490
491 return ret; 496 return ret;
492} 497}
493 498
@@ -576,8 +581,6 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
576 581
577 user_data = (char __user *) (uintptr_t) args->data_ptr; 582 user_data = (char __user *) (uintptr_t) args->data_ptr;
578 remain = args->size; 583 remain = args->size;
579 if (!access_ok(VERIFY_READ, user_data, remain))
580 return -EFAULT;
581 584
582 585
583 mutex_lock(&dev->struct_mutex); 586 mutex_lock(&dev->struct_mutex);
@@ -930,14 +933,17 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
930 return -ENOENT; 933 return -ENOENT;
931 obj_priv = to_intel_bo(obj); 934 obj_priv = to_intel_bo(obj);
932 935
933 /* Bounds check destination. 936 /* Bounds check destination. */
934 * 937 if (args->offset > obj->size || args->size > obj->size - args->offset) {
935 * XXX: This could use review for overflow issues... 938 ret = -EINVAL;
936 */ 939 goto err;
937 if (args->offset > obj->size || args->size > obj->size || 940 }
938 args->offset + args->size > obj->size) { 941
939 drm_gem_object_unreference_unlocked(obj); 942 if (!access_ok(VERIFY_READ,
940 return -EINVAL; 943 (char __user *)(uintptr_t)args->data_ptr,
944 args->size)) {
945 ret = -EFAULT;
946 goto err;
941 } 947 }
942 948
943 /* We can only do the GTT pwrite on untiled buffers, as otherwise 949 /* We can only do the GTT pwrite on untiled buffers, as otherwise
@@ -971,8 +977,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
971 DRM_INFO("pwrite failed %d\n", ret); 977 DRM_INFO("pwrite failed %d\n", ret);
972#endif 978#endif
973 979
980err:
974 drm_gem_object_unreference_unlocked(obj); 981 drm_gem_object_unreference_unlocked(obj);
975
976 return ret; 982 return ret;
977} 983}
978 984
@@ -2347,14 +2353,21 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
2347 2353
2348 reg->obj = obj; 2354 reg->obj = obj;
2349 2355
2350 if (IS_GEN6(dev)) 2356 switch (INTEL_INFO(dev)->gen) {
2357 case 6:
2351 sandybridge_write_fence_reg(reg); 2358 sandybridge_write_fence_reg(reg);
2352 else if (IS_I965G(dev)) 2359 break;
2360 case 5:
2361 case 4:
2353 i965_write_fence_reg(reg); 2362 i965_write_fence_reg(reg);
2354 else if (IS_I9XX(dev)) 2363 break;
2364 case 3:
2355 i915_write_fence_reg(reg); 2365 i915_write_fence_reg(reg);
2356 else 2366 break;
2367 case 2:
2357 i830_write_fence_reg(reg); 2368 i830_write_fence_reg(reg);
2369 break;
2370 }
2358 2371
2359 trace_i915_gem_object_get_fence(obj, obj_priv->fence_reg, 2372 trace_i915_gem_object_get_fence(obj, obj_priv->fence_reg,
2360 obj_priv->tiling_mode); 2373 obj_priv->tiling_mode);
@@ -2377,22 +2390,26 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
2377 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 2390 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2378 struct drm_i915_fence_reg *reg = 2391 struct drm_i915_fence_reg *reg =
2379 &dev_priv->fence_regs[obj_priv->fence_reg]; 2392 &dev_priv->fence_regs[obj_priv->fence_reg];
2393 uint32_t fence_reg;
2380 2394
2381 if (IS_GEN6(dev)) { 2395 switch (INTEL_INFO(dev)->gen) {
2396 case 6:
2382 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + 2397 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 +
2383 (obj_priv->fence_reg * 8), 0); 2398 (obj_priv->fence_reg * 8), 0);
2384 } else if (IS_I965G(dev)) { 2399 break;
2400 case 5:
2401 case 4:
2385 I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0); 2402 I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
2386 } else { 2403 break;
2387 uint32_t fence_reg; 2404 case 3:
2388 2405 if (obj_priv->fence_reg >= 8)
2389 if (obj_priv->fence_reg < 8) 2406 fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg - 8) * 4;
2390 fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
2391 else 2407 else
2392 fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg - 2408 case 2:
2393 8) * 4; 2409 fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
2394 2410
2395 I915_WRITE(fence_reg, 0); 2411 I915_WRITE(fence_reg, 0);
2412 break;
2396 } 2413 }
2397 2414
2398 reg->obj = NULL; 2415 reg->obj = NULL;
@@ -3243,6 +3260,8 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3243 (int) reloc->offset, 3260 (int) reloc->offset,
3244 reloc->read_domains, 3261 reloc->read_domains,
3245 reloc->write_domain); 3262 reloc->write_domain);
3263 drm_gem_object_unreference(target_obj);
3264 i915_gem_object_unpin(obj);
3246 return -EINVAL; 3265 return -EINVAL;
3247 } 3266 }
3248 if (reloc->write_domain & I915_GEM_DOMAIN_CPU || 3267 if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
@@ -3585,6 +3604,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3585 if (ret != 0) { 3604 if (ret != 0) {
3586 DRM_ERROR("copy %d cliprects failed: %d\n", 3605 DRM_ERROR("copy %d cliprects failed: %d\n",
3587 args->num_cliprects, ret); 3606 args->num_cliprects, ret);
3607 ret = -EFAULT;
3588 goto pre_mutex_err; 3608 goto pre_mutex_err;
3589 } 3609 }
3590 } 3610 }
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 72cae3cccad8..5c428fa3e0b3 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -79,6 +79,7 @@ mark_free(struct drm_i915_gem_object *obj_priv,
79 struct list_head *unwind) 79 struct list_head *unwind)
80{ 80{
81 list_add(&obj_priv->evict_list, unwind); 81 list_add(&obj_priv->evict_list, unwind);
82 drm_gem_object_reference(&obj_priv->base);
82 return drm_mm_scan_add_block(obj_priv->gtt_space); 83 return drm_mm_scan_add_block(obj_priv->gtt_space);
83} 84}
84 85
@@ -92,7 +93,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen
92{ 93{
93 drm_i915_private_t *dev_priv = dev->dev_private; 94 drm_i915_private_t *dev_priv = dev->dev_private;
94 struct list_head eviction_list, unwind_list; 95 struct list_head eviction_list, unwind_list;
95 struct drm_i915_gem_object *obj_priv, *tmp_obj_priv; 96 struct drm_i915_gem_object *obj_priv;
96 struct list_head *render_iter, *bsd_iter; 97 struct list_head *render_iter, *bsd_iter;
97 int ret = 0; 98 int ret = 0;
98 99
@@ -165,6 +166,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen
165 list_for_each_entry(obj_priv, &unwind_list, evict_list) { 166 list_for_each_entry(obj_priv, &unwind_list, evict_list) {
166 ret = drm_mm_scan_remove_block(obj_priv->gtt_space); 167 ret = drm_mm_scan_remove_block(obj_priv->gtt_space);
167 BUG_ON(ret); 168 BUG_ON(ret);
169 drm_gem_object_unreference(&obj_priv->base);
168 } 170 }
169 171
170 /* We expect the caller to unpin, evict all and try again, or give up. 172 /* We expect the caller to unpin, evict all and try again, or give up.
@@ -173,36 +175,34 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen
173 return -ENOSPC; 175 return -ENOSPC;
174 176
175found: 177found:
178 /* drm_mm doesn't allow any other other operations while
179 * scanning, therefore store to be evicted objects on a
180 * temporary list. */
176 INIT_LIST_HEAD(&eviction_list); 181 INIT_LIST_HEAD(&eviction_list);
177 list_for_each_entry_safe(obj_priv, tmp_obj_priv, 182 while (!list_empty(&unwind_list)) {
178 &unwind_list, evict_list) { 183 obj_priv = list_first_entry(&unwind_list,
184 struct drm_i915_gem_object,
185 evict_list);
179 if (drm_mm_scan_remove_block(obj_priv->gtt_space)) { 186 if (drm_mm_scan_remove_block(obj_priv->gtt_space)) {
180 /* drm_mm doesn't allow any other other operations while
181 * scanning, therefore store to be evicted objects on a
182 * temporary list. */
183 list_move(&obj_priv->evict_list, &eviction_list); 187 list_move(&obj_priv->evict_list, &eviction_list);
188 continue;
184 } 189 }
190 list_del(&obj_priv->evict_list);
191 drm_gem_object_unreference(&obj_priv->base);
185 } 192 }
186 193
187 /* Unbinding will emit any required flushes */ 194 /* Unbinding will emit any required flushes */
188 list_for_each_entry_safe(obj_priv, tmp_obj_priv, 195 while (!list_empty(&eviction_list)) {
189 &eviction_list, evict_list) { 196 obj_priv = list_first_entry(&eviction_list,
190#if WATCH_LRU 197 struct drm_i915_gem_object,
191 DRM_INFO("%s: evicting %p\n", __func__, obj); 198 evict_list);
192#endif 199 if (ret == 0)
193 ret = i915_gem_object_unbind(&obj_priv->base); 200 ret = i915_gem_object_unbind(&obj_priv->base);
194 if (ret) 201 list_del(&obj_priv->evict_list);
195 return ret; 202 drm_gem_object_unreference(&obj_priv->base);
196 } 203 }
197 204
198 /* The just created free hole should be on the top of the free stack 205 return ret;
199 * maintained by drm_mm, so this BUG_ON actually executes in O(1).
200 * Furthermore all accessed data has just recently been used, so it
201 * should be really fast, too. */
202 BUG_ON(!drm_mm_search_free(&dev_priv->mm.gtt_space, min_size,
203 alignment, 0));
204
205 return 0;
206} 206}
207 207
208int 208int
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 16861b800fee..744225ebb4b2 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -887,6 +887,49 @@ static void i915_handle_error(struct drm_device *dev, bool wedged)
887 queue_work(dev_priv->wq, &dev_priv->error_work); 887 queue_work(dev_priv->wq, &dev_priv->error_work);
888} 888}
889 889
890static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
891{
892 drm_i915_private_t *dev_priv = dev->dev_private;
893 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
894 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
895 struct drm_i915_gem_object *obj_priv;
896 struct intel_unpin_work *work;
897 unsigned long flags;
898 bool stall_detected;
899
900 /* Ignore early vblank irqs */
901 if (intel_crtc == NULL)
902 return;
903
904 spin_lock_irqsave(&dev->event_lock, flags);
905 work = intel_crtc->unpin_work;
906
907 if (work == NULL || work->pending || !work->enable_stall_check) {
908 /* Either the pending flip IRQ arrived, or we're too early. Don't check */
909 spin_unlock_irqrestore(&dev->event_lock, flags);
910 return;
911 }
912
913 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
914 obj_priv = to_intel_bo(work->pending_flip_obj);
915 if(IS_I965G(dev)) {
916 int dspsurf = intel_crtc->plane == 0 ? DSPASURF : DSPBSURF;
917 stall_detected = I915_READ(dspsurf) == obj_priv->gtt_offset;
918 } else {
919 int dspaddr = intel_crtc->plane == 0 ? DSPAADDR : DSPBADDR;
920 stall_detected = I915_READ(dspaddr) == (obj_priv->gtt_offset +
921 crtc->y * crtc->fb->pitch +
922 crtc->x * crtc->fb->bits_per_pixel/8);
923 }
924
925 spin_unlock_irqrestore(&dev->event_lock, flags);
926
927 if (stall_detected) {
928 DRM_DEBUG_DRIVER("Pageflip stall detected\n");
929 intel_prepare_page_flip(dev, intel_crtc->plane);
930 }
931}
932
890irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) 933irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
891{ 934{
892 struct drm_device *dev = (struct drm_device *) arg; 935 struct drm_device *dev = (struct drm_device *) arg;
@@ -1004,15 +1047,19 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
1004 if (pipea_stats & vblank_status) { 1047 if (pipea_stats & vblank_status) {
1005 vblank++; 1048 vblank++;
1006 drm_handle_vblank(dev, 0); 1049 drm_handle_vblank(dev, 0);
1007 if (!dev_priv->flip_pending_is_done) 1050 if (!dev_priv->flip_pending_is_done) {
1051 i915_pageflip_stall_check(dev, 0);
1008 intel_finish_page_flip(dev, 0); 1052 intel_finish_page_flip(dev, 0);
1053 }
1009 } 1054 }
1010 1055
1011 if (pipeb_stats & vblank_status) { 1056 if (pipeb_stats & vblank_status) {
1012 vblank++; 1057 vblank++;
1013 drm_handle_vblank(dev, 1); 1058 drm_handle_vblank(dev, 1);
1014 if (!dev_priv->flip_pending_is_done) 1059 if (!dev_priv->flip_pending_is_done) {
1060 i915_pageflip_stall_check(dev, 1);
1015 intel_finish_page_flip(dev, 1); 1061 intel_finish_page_flip(dev, 1);
1062 }
1016 } 1063 }
1017 1064
1018 if ((pipea_stats & PIPE_LEGACY_BLC_EVENT_STATUS) || 1065 if ((pipea_stats & PIPE_LEGACY_BLC_EVENT_STATUS) ||
@@ -1303,17 +1350,25 @@ void i915_hangcheck_elapsed(unsigned long data)
1303 i915_seqno_passed(i915_get_gem_seqno(dev, 1350 i915_seqno_passed(i915_get_gem_seqno(dev,
1304 &dev_priv->render_ring), 1351 &dev_priv->render_ring),
1305 i915_get_tail_request(dev)->seqno)) { 1352 i915_get_tail_request(dev)->seqno)) {
1353 bool missed_wakeup = false;
1354
1306 dev_priv->hangcheck_count = 0; 1355 dev_priv->hangcheck_count = 0;
1307 1356
1308 /* Issue a wake-up to catch stuck h/w. */ 1357 /* Issue a wake-up to catch stuck h/w. */
1309 if (dev_priv->render_ring.waiting_gem_seqno | 1358 if (dev_priv->render_ring.waiting_gem_seqno &&
1310 dev_priv->bsd_ring.waiting_gem_seqno) { 1359 waitqueue_active(&dev_priv->render_ring.irq_queue)) {
1311 DRM_ERROR("Hangcheck timer elapsed... GPU idle, missed IRQ.\n"); 1360 DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
1312 if (dev_priv->render_ring.waiting_gem_seqno) 1361 missed_wakeup = true;
1313 DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
1314 if (dev_priv->bsd_ring.waiting_gem_seqno)
1315 DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
1316 } 1362 }
1363
1364 if (dev_priv->bsd_ring.waiting_gem_seqno &&
1365 waitqueue_active(&dev_priv->bsd_ring.irq_queue)) {
1366 DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
1367 missed_wakeup = true;
1368 }
1369
1370 if (missed_wakeup)
1371 DRM_ERROR("Hangcheck timer elapsed... GPU idle, missed IRQ.\n");
1317 return; 1372 return;
1318 } 1373 }
1319 1374
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 67e3ec1a6af9..4f5e15577e89 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -319,6 +319,7 @@
319 319
320#define MI_MODE 0x0209c 320#define MI_MODE 0x0209c
321# define VS_TIMER_DISPATCH (1 << 6) 321# define VS_TIMER_DISPATCH (1 << 6)
322# define MI_FLUSH_ENABLE (1 << 11)
322 323
323#define SCPD0 0x0209c /* 915+ only */ 324#define SCPD0 0x0209c /* 915+ only */
324#define IER 0x020a0 325#define IER 0x020a0
@@ -2205,9 +2206,17 @@
2205#define WM1_LP_SR_EN (1<<31) 2206#define WM1_LP_SR_EN (1<<31)
2206#define WM1_LP_LATENCY_SHIFT 24 2207#define WM1_LP_LATENCY_SHIFT 24
2207#define WM1_LP_LATENCY_MASK (0x7f<<24) 2208#define WM1_LP_LATENCY_MASK (0x7f<<24)
2209#define WM1_LP_FBC_LP1_MASK (0xf<<20)
2210#define WM1_LP_FBC_LP1_SHIFT 20
2208#define WM1_LP_SR_MASK (0x1ff<<8) 2211#define WM1_LP_SR_MASK (0x1ff<<8)
2209#define WM1_LP_SR_SHIFT 8 2212#define WM1_LP_SR_SHIFT 8
2210#define WM1_LP_CURSOR_MASK (0x3f) 2213#define WM1_LP_CURSOR_MASK (0x3f)
2214#define WM2_LP_ILK 0x4510c
2215#define WM2_LP_EN (1<<31)
2216#define WM3_LP_ILK 0x45110
2217#define WM3_LP_EN (1<<31)
2218#define WM1S_LP_ILK 0x45120
2219#define WM1S_LP_EN (1<<31)
2211 2220
2212/* Memory latency timer register */ 2221/* Memory latency timer register */
2213#define MLTR_ILK 0x11222 2222#define MLTR_ILK 0x11222
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 2c6b98f2440e..31f08581e93a 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -789,16 +789,25 @@ int i915_save_state(struct drm_device *dev)
789 dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2)); 789 dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
790 790
791 /* Fences */ 791 /* Fences */
792 if (IS_I965G(dev)) { 792 switch (INTEL_INFO(dev)->gen) {
793 case 6:
794 for (i = 0; i < 16; i++)
795 dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
796 break;
797 case 5:
798 case 4:
793 for (i = 0; i < 16; i++) 799 for (i = 0; i < 16; i++)
794 dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); 800 dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
795 } else { 801 break;
796 for (i = 0; i < 8; i++) 802 case 3:
797 dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
798
799 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 803 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
800 for (i = 0; i < 8; i++) 804 for (i = 0; i < 8; i++)
801 dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); 805 dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
806 case 2:
807 for (i = 0; i < 8; i++)
808 dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
809 break;
810
802 } 811 }
803 812
804 return 0; 813 return 0;
@@ -815,15 +824,24 @@ int i915_restore_state(struct drm_device *dev)
815 I915_WRITE(HWS_PGA, dev_priv->saveHWS); 824 I915_WRITE(HWS_PGA, dev_priv->saveHWS);
816 825
817 /* Fences */ 826 /* Fences */
818 if (IS_I965G(dev)) { 827 switch (INTEL_INFO(dev)->gen) {
828 case 6:
829 for (i = 0; i < 16; i++)
830 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]);
831 break;
832 case 5:
833 case 4:
819 for (i = 0; i < 16; i++) 834 for (i = 0; i < 16; i++)
820 I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]); 835 I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]);
821 } else { 836 break;
822 for (i = 0; i < 8; i++) 837 case 3:
823 I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]); 838 case 2:
824 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 839 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
825 for (i = 0; i < 8; i++) 840 for (i = 0; i < 8; i++)
826 I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]); 841 I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]);
842 for (i = 0; i < 8; i++)
843 I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]);
844 break;
827 } 845 }
828 846
829 i915_restore_display(dev); 847 i915_restore_display(dev);
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 4b7735196cd5..197d4f32585a 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -188,7 +188,7 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
188 188
189 if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0, 189 if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
190 1000, 1)) 190 1000, 1))
191 DRM_ERROR("timed out waiting for FORCE_TRIGGER"); 191 DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
192 192
193 if (turn_off_dac) { 193 if (turn_off_dac) {
194 I915_WRITE(PCH_ADPA, temp); 194 I915_WRITE(PCH_ADPA, temp);
@@ -245,7 +245,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
245 if (wait_for((I915_READ(PORT_HOTPLUG_EN) & 245 if (wait_for((I915_READ(PORT_HOTPLUG_EN) &
246 CRT_HOTPLUG_FORCE_DETECT) == 0, 246 CRT_HOTPLUG_FORCE_DETECT) == 0,
247 1000, 1)) 247 1000, 1))
248 DRM_ERROR("timed out waiting for FORCE_DETECT to go off"); 248 DRM_DEBUG_KMS("timed out waiting for FORCE_DETECT to go off");
249 } 249 }
250 250
251 stat = I915_READ(PORT_HOTPLUG_STAT); 251 stat = I915_READ(PORT_HOTPLUG_STAT);
@@ -400,7 +400,8 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder
400 return status; 400 return status;
401} 401}
402 402
403static enum drm_connector_status intel_crt_detect(struct drm_connector *connector) 403static enum drm_connector_status
404intel_crt_detect(struct drm_connector *connector, bool force)
404{ 405{
405 struct drm_device *dev = connector->dev; 406 struct drm_device *dev = connector->dev;
406 struct drm_encoder *encoder = intel_attached_encoder(connector); 407 struct drm_encoder *encoder = intel_attached_encoder(connector);
@@ -419,6 +420,9 @@ static enum drm_connector_status intel_crt_detect(struct drm_connector *connecto
419 if (intel_crt_detect_ddc(encoder)) 420 if (intel_crt_detect_ddc(encoder))
420 return connector_status_connected; 421 return connector_status_connected;
421 422
423 if (!force)
424 return connector->status;
425
422 /* for pre-945g platforms use load detect */ 426 /* for pre-945g platforms use load detect */
423 if (encoder->crtc && encoder->crtc->enabled) { 427 if (encoder->crtc && encoder->crtc->enabled) {
424 status = intel_crt_load_detect(encoder->crtc, intel_encoder); 428 status = intel_crt_load_detect(encoder->crtc, intel_encoder);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 23157e1de3be..979228594599 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -990,15 +990,31 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe)
990 struct drm_i915_private *dev_priv = dev->dev_private; 990 struct drm_i915_private *dev_priv = dev->dev_private;
991 int pipestat_reg = (pipe == 0 ? PIPEASTAT : PIPEBSTAT); 991 int pipestat_reg = (pipe == 0 ? PIPEASTAT : PIPEBSTAT);
992 992
993 /* Clear existing vblank status. Note this will clear any other
994 * sticky status fields as well.
995 *
996 * This races with i915_driver_irq_handler() with the result
997 * that either function could miss a vblank event. Here it is not
998 * fatal, as we will either wait upon the next vblank interrupt or
999 * timeout. Generally speaking intel_wait_for_vblank() is only
1000 * called during modeset at which time the GPU should be idle and
1001 * should *not* be performing page flips and thus not waiting on
1002 * vblanks...
1003 * Currently, the result of us stealing a vblank from the irq
1004 * handler is that a single frame will be skipped during swapbuffers.
1005 */
1006 I915_WRITE(pipestat_reg,
1007 I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
1008
993 /* Wait for vblank interrupt bit to set */ 1009 /* Wait for vblank interrupt bit to set */
994 if (wait_for((I915_READ(pipestat_reg) & 1010 if (wait_for((I915_READ(pipestat_reg) &
995 PIPE_VBLANK_INTERRUPT_STATUS) == 0, 1011 PIPE_VBLANK_INTERRUPT_STATUS),
996 50, 0)) 1012 50, 0))
997 DRM_DEBUG_KMS("vblank wait timed out\n"); 1013 DRM_DEBUG_KMS("vblank wait timed out\n");
998} 1014}
999 1015
1000/** 1016/*
1001 * intel_wait_for_vblank_off - wait for vblank after disabling a pipe 1017 * intel_wait_for_pipe_off - wait for pipe to turn off
1002 * @dev: drm device 1018 * @dev: drm device
1003 * @pipe: pipe to wait for 1019 * @pipe: pipe to wait for
1004 * 1020 *
@@ -1006,25 +1022,39 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe)
1006 * spinning on the vblank interrupt status bit, since we won't actually 1022 * spinning on the vblank interrupt status bit, since we won't actually
1007 * see an interrupt when the pipe is disabled. 1023 * see an interrupt when the pipe is disabled.
1008 * 1024 *
1009 * So this function waits for the display line value to settle (it 1025 * On Gen4 and above:
1010 * usually ends up stopping at the start of the next frame). 1026 * wait for the pipe register state bit to turn off
1027 *
1028 * Otherwise:
1029 * wait for the display line value to settle (it usually
1030 * ends up stopping at the start of the next frame).
1031 *
1011 */ 1032 */
1012void intel_wait_for_vblank_off(struct drm_device *dev, int pipe) 1033static void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
1013{ 1034{
1014 struct drm_i915_private *dev_priv = dev->dev_private; 1035 struct drm_i915_private *dev_priv = dev->dev_private;
1015 int pipedsl_reg = (pipe == 0 ? PIPEADSL : PIPEBDSL); 1036
1016 unsigned long timeout = jiffies + msecs_to_jiffies(100); 1037 if (INTEL_INFO(dev)->gen >= 4) {
1017 u32 last_line; 1038 int pipeconf_reg = (pipe == 0 ? PIPEACONF : PIPEBCONF);
1018 1039
1019 /* Wait for the display line to settle */ 1040 /* Wait for the Pipe State to go off */
1020 do { 1041 if (wait_for((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) == 0,
1021 last_line = I915_READ(pipedsl_reg) & DSL_LINEMASK; 1042 100, 0))
1022 mdelay(5); 1043 DRM_DEBUG_KMS("pipe_off wait timed out\n");
1023 } while (((I915_READ(pipedsl_reg) & DSL_LINEMASK) != last_line) && 1044 } else {
1024 time_after(timeout, jiffies)); 1045 u32 last_line;
1025 1046 int pipedsl_reg = (pipe == 0 ? PIPEADSL : PIPEBDSL);
1026 if (time_after(jiffies, timeout)) 1047 unsigned long timeout = jiffies + msecs_to_jiffies(100);
1027 DRM_DEBUG_KMS("vblank wait timed out\n"); 1048
1049 /* Wait for the display line to settle */
1050 do {
1051 last_line = I915_READ(pipedsl_reg) & DSL_LINEMASK;
1052 mdelay(5);
1053 } while (((I915_READ(pipedsl_reg) & DSL_LINEMASK) != last_line) &&
1054 time_after(timeout, jiffies));
1055 if (time_after(jiffies, timeout))
1056 DRM_DEBUG_KMS("pipe_off wait timed out\n");
1057 }
1028} 1058}
1029 1059
1030/* Parameters have changed, update FBC info */ 1060/* Parameters have changed, update FBC info */
@@ -1486,7 +1516,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1486 dspcntr &= ~DISPPLANE_TILED; 1516 dspcntr &= ~DISPPLANE_TILED;
1487 } 1517 }
1488 1518
1489 if (IS_IRONLAKE(dev)) 1519 if (HAS_PCH_SPLIT(dev))
1490 /* must disable */ 1520 /* must disable */
1491 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; 1521 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
1492 1522
@@ -1495,20 +1525,19 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1495 Start = obj_priv->gtt_offset; 1525 Start = obj_priv->gtt_offset;
1496 Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8); 1526 Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
1497 1527
1498 DRM_DEBUG("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y); 1528 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
1529 Start, Offset, x, y, fb->pitch);
1499 I915_WRITE(dspstride, fb->pitch); 1530 I915_WRITE(dspstride, fb->pitch);
1500 if (IS_I965G(dev)) { 1531 if (IS_I965G(dev)) {
1501 I915_WRITE(dspbase, Offset);
1502 I915_READ(dspbase);
1503 I915_WRITE(dspsurf, Start); 1532 I915_WRITE(dspsurf, Start);
1504 I915_READ(dspsurf);
1505 I915_WRITE(dsptileoff, (y << 16) | x); 1533 I915_WRITE(dsptileoff, (y << 16) | x);
1534 I915_WRITE(dspbase, Offset);
1506 } else { 1535 } else {
1507 I915_WRITE(dspbase, Start + Offset); 1536 I915_WRITE(dspbase, Start + Offset);
1508 I915_READ(dspbase);
1509 } 1537 }
1538 POSTING_READ(dspbase);
1510 1539
1511 if ((IS_I965G(dev) || plane == 0)) 1540 if (IS_I965G(dev) || plane == 0)
1512 intel_update_fbc(crtc, &crtc->mode); 1541 intel_update_fbc(crtc, &crtc->mode);
1513 1542
1514 intel_wait_for_vblank(dev, intel_crtc->pipe); 1543 intel_wait_for_vblank(dev, intel_crtc->pipe);
@@ -1522,7 +1551,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1522 struct drm_framebuffer *old_fb) 1551 struct drm_framebuffer *old_fb)
1523{ 1552{
1524 struct drm_device *dev = crtc->dev; 1553 struct drm_device *dev = crtc->dev;
1525 struct drm_i915_private *dev_priv = dev->dev_private;
1526 struct drm_i915_master_private *master_priv; 1554 struct drm_i915_master_private *master_priv;
1527 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1555 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1528 struct intel_framebuffer *intel_fb; 1556 struct intel_framebuffer *intel_fb;
@@ -1530,13 +1558,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1530 struct drm_gem_object *obj; 1558 struct drm_gem_object *obj;
1531 int pipe = intel_crtc->pipe; 1559 int pipe = intel_crtc->pipe;
1532 int plane = intel_crtc->plane; 1560 int plane = intel_crtc->plane;
1533 unsigned long Start, Offset;
1534 int dspbase = (plane == 0 ? DSPAADDR : DSPBADDR);
1535 int dspsurf = (plane == 0 ? DSPASURF : DSPBSURF);
1536 int dspstride = (plane == 0) ? DSPASTRIDE : DSPBSTRIDE;
1537 int dsptileoff = (plane == 0 ? DSPATILEOFF : DSPBTILEOFF);
1538 int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
1539 u32 dspcntr;
1540 int ret; 1561 int ret;
1541 1562
1542 /* no fb bound */ 1563 /* no fb bound */
@@ -1572,71 +1593,18 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1572 return ret; 1593 return ret;
1573 } 1594 }
1574 1595
1575 dspcntr = I915_READ(dspcntr_reg); 1596 ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y);
1576 /* Mask out pixel format bits in case we change it */ 1597 if (ret) {
1577 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
1578 switch (crtc->fb->bits_per_pixel) {
1579 case 8:
1580 dspcntr |= DISPPLANE_8BPP;
1581 break;
1582 case 16:
1583 if (crtc->fb->depth == 15)
1584 dspcntr |= DISPPLANE_15_16BPP;
1585 else
1586 dspcntr |= DISPPLANE_16BPP;
1587 break;
1588 case 24:
1589 case 32:
1590 if (crtc->fb->depth == 30)
1591 dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA;
1592 else
1593 dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
1594 break;
1595 default:
1596 DRM_ERROR("Unknown color depth\n");
1597 i915_gem_object_unpin(obj); 1598 i915_gem_object_unpin(obj);
1598 mutex_unlock(&dev->struct_mutex); 1599 mutex_unlock(&dev->struct_mutex);
1599 return -EINVAL; 1600 return ret;
1600 }
1601 if (IS_I965G(dev)) {
1602 if (obj_priv->tiling_mode != I915_TILING_NONE)
1603 dspcntr |= DISPPLANE_TILED;
1604 else
1605 dspcntr &= ~DISPPLANE_TILED;
1606 }
1607
1608 if (HAS_PCH_SPLIT(dev))
1609 /* must disable */
1610 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
1611
1612 I915_WRITE(dspcntr_reg, dspcntr);
1613
1614 Start = obj_priv->gtt_offset;
1615 Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
1616
1617 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
1618 Start, Offset, x, y, crtc->fb->pitch);
1619 I915_WRITE(dspstride, crtc->fb->pitch);
1620 if (IS_I965G(dev)) {
1621 I915_WRITE(dspsurf, Start);
1622 I915_WRITE(dsptileoff, (y << 16) | x);
1623 I915_WRITE(dspbase, Offset);
1624 } else {
1625 I915_WRITE(dspbase, Start + Offset);
1626 } 1601 }
1627 POSTING_READ(dspbase);
1628
1629 if ((IS_I965G(dev) || plane == 0))
1630 intel_update_fbc(crtc, &crtc->mode);
1631
1632 intel_wait_for_vblank(dev, pipe);
1633 1602
1634 if (old_fb) { 1603 if (old_fb) {
1635 intel_fb = to_intel_framebuffer(old_fb); 1604 intel_fb = to_intel_framebuffer(old_fb);
1636 obj_priv = to_intel_bo(intel_fb->obj); 1605 obj_priv = to_intel_bo(intel_fb->obj);
1637 i915_gem_object_unpin(intel_fb->obj); 1606 i915_gem_object_unpin(intel_fb->obj);
1638 } 1607 }
1639 intel_increase_pllclock(crtc, true);
1640 1608
1641 mutex_unlock(&dev->struct_mutex); 1609 mutex_unlock(&dev->struct_mutex);
1642 1610
@@ -1911,9 +1879,6 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1911 int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL; 1879 int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
1912 int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL; 1880 int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
1913 int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF; 1881 int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF;
1914 int pf_ctl_reg = (pipe == 0) ? PFA_CTL_1 : PFB_CTL_1;
1915 int pf_win_size = (pipe == 0) ? PFA_WIN_SZ : PFB_WIN_SZ;
1916 int pf_win_pos = (pipe == 0) ? PFA_WIN_POS : PFB_WIN_POS;
1917 int cpu_htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B; 1882 int cpu_htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
1918 int cpu_hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B; 1883 int cpu_hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
1919 int cpu_hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B; 1884 int cpu_hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
@@ -1982,15 +1947,19 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1982 } 1947 }
1983 1948
1984 /* Enable panel fitting for LVDS */ 1949 /* Enable panel fitting for LVDS */
1985 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) 1950 if (dev_priv->pch_pf_size &&
1986 || HAS_eDP || intel_pch_has_edp(crtc)) { 1951 (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)
1987 if (dev_priv->pch_pf_size) { 1952 || HAS_eDP || intel_pch_has_edp(crtc))) {
1988 temp = I915_READ(pf_ctl_reg); 1953 /* Force use of hard-coded filter coefficients
1989 I915_WRITE(pf_ctl_reg, temp | PF_ENABLE | PF_FILTER_MED_3x3); 1954 * as some pre-programmed values are broken,
1990 I915_WRITE(pf_win_pos, dev_priv->pch_pf_pos); 1955 * e.g. x201.
1991 I915_WRITE(pf_win_size, dev_priv->pch_pf_size); 1956 */
1992 } else 1957 I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1,
1993 I915_WRITE(pf_ctl_reg, temp & ~PF_ENABLE); 1958 PF_ENABLE | PF_FILTER_MED_3x3);
1959 I915_WRITE(pipe ? PFB_WIN_POS : PFA_WIN_POS,
1960 dev_priv->pch_pf_pos);
1961 I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ,
1962 dev_priv->pch_pf_size);
1994 } 1963 }
1995 1964
1996 /* Enable CPU pipe */ 1965 /* Enable CPU pipe */
@@ -2115,7 +2084,7 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
2115 I915_WRITE(transconf_reg, temp | TRANS_ENABLE); 2084 I915_WRITE(transconf_reg, temp | TRANS_ENABLE);
2116 I915_READ(transconf_reg); 2085 I915_READ(transconf_reg);
2117 2086
2118 if (wait_for(I915_READ(transconf_reg) & TRANS_STATE_ENABLE, 10, 0)) 2087 if (wait_for(I915_READ(transconf_reg) & TRANS_STATE_ENABLE, 100, 1))
2119 DRM_ERROR("failed to enable transcoder\n"); 2088 DRM_ERROR("failed to enable transcoder\n");
2120 } 2089 }
2121 2090
@@ -2155,14 +2124,8 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
2155 udelay(100); 2124 udelay(100);
2156 2125
2157 /* Disable PF */ 2126 /* Disable PF */
2158 temp = I915_READ(pf_ctl_reg); 2127 I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1, 0);
2159 if ((temp & PF_ENABLE) != 0) { 2128 I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ, 0);
2160 I915_WRITE(pf_ctl_reg, temp & ~PF_ENABLE);
2161 I915_READ(pf_ctl_reg);
2162 }
2163 I915_WRITE(pf_win_size, 0);
2164 POSTING_READ(pf_win_size);
2165
2166 2129
2167 /* disable CPU FDI tx and PCH FDI rx */ 2130 /* disable CPU FDI tx and PCH FDI rx */
2168 temp = I915_READ(fdi_tx_reg); 2131 temp = I915_READ(fdi_tx_reg);
@@ -2379,13 +2342,13 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
2379 I915_READ(dspbase_reg); 2342 I915_READ(dspbase_reg);
2380 } 2343 }
2381 2344
2382 /* Wait for vblank for the disable to take effect */
2383 intel_wait_for_vblank_off(dev, pipe);
2384
2385 /* Don't disable pipe A or pipe A PLLs if needed */ 2345 /* Don't disable pipe A or pipe A PLLs if needed */
2386 if (pipeconf_reg == PIPEACONF && 2346 if (pipeconf_reg == PIPEACONF &&
2387 (dev_priv->quirks & QUIRK_PIPEA_FORCE)) 2347 (dev_priv->quirks & QUIRK_PIPEA_FORCE)) {
2348 /* Wait for vblank for the disable to take effect */
2349 intel_wait_for_vblank(dev, pipe);
2388 goto skip_pipe_off; 2350 goto skip_pipe_off;
2351 }
2389 2352
2390 /* Next, disable display pipes */ 2353 /* Next, disable display pipes */
2391 temp = I915_READ(pipeconf_reg); 2354 temp = I915_READ(pipeconf_reg);
@@ -2394,8 +2357,8 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
2394 I915_READ(pipeconf_reg); 2357 I915_READ(pipeconf_reg);
2395 } 2358 }
2396 2359
2397 /* Wait for vblank for the disable to take effect. */ 2360 /* Wait for the pipe to turn off */
2398 intel_wait_for_vblank_off(dev, pipe); 2361 intel_wait_for_pipe_off(dev, pipe);
2399 2362
2400 temp = I915_READ(dpll_reg); 2363 temp = I915_READ(dpll_reg);
2401 if ((temp & DPLL_VCO_ENABLE) != 0) { 2364 if ((temp & DPLL_VCO_ENABLE) != 0) {
@@ -2421,6 +2384,9 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
2421 int pipe = intel_crtc->pipe; 2384 int pipe = intel_crtc->pipe;
2422 bool enabled; 2385 bool enabled;
2423 2386
2387 if (intel_crtc->dpms_mode == mode)
2388 return;
2389
2424 intel_crtc->dpms_mode = mode; 2390 intel_crtc->dpms_mode = mode;
2425 intel_crtc->cursor_on = mode == DRM_MODE_DPMS_ON; 2391 intel_crtc->cursor_on = mode == DRM_MODE_DPMS_ON;
2426 2392
@@ -2511,11 +2477,19 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
2511 struct drm_display_mode *adjusted_mode) 2477 struct drm_display_mode *adjusted_mode)
2512{ 2478{
2513 struct drm_device *dev = crtc->dev; 2479 struct drm_device *dev = crtc->dev;
2480
2514 if (HAS_PCH_SPLIT(dev)) { 2481 if (HAS_PCH_SPLIT(dev)) {
2515 /* FDI link clock is fixed at 2.7G */ 2482 /* FDI link clock is fixed at 2.7G */
2516 if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4) 2483 if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4)
2517 return false; 2484 return false;
2518 } 2485 }
2486
2487 /* XXX some encoders set the crtcinfo, others don't.
2488 * Obviously we need some form of conflict resolution here...
2489 */
2490 if (adjusted_mode->crtc_htotal == 0)
2491 drm_mode_set_crtcinfo(adjusted_mode, 0);
2492
2519 return true; 2493 return true;
2520} 2494}
2521 2495
@@ -2815,14 +2789,8 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
2815 /* Don't promote wm_size to unsigned... */ 2789 /* Don't promote wm_size to unsigned... */
2816 if (wm_size > (long)wm->max_wm) 2790 if (wm_size > (long)wm->max_wm)
2817 wm_size = wm->max_wm; 2791 wm_size = wm->max_wm;
2818 if (wm_size <= 0) { 2792 if (wm_size <= 0)
2819 wm_size = wm->default_wm; 2793 wm_size = wm->default_wm;
2820 DRM_ERROR("Insufficient FIFO for plane, expect flickering:"
2821 " entries required = %ld, available = %lu.\n",
2822 entries_required + wm->guard_size,
2823 wm->fifo_size);
2824 }
2825
2826 return wm_size; 2794 return wm_size;
2827} 2795}
2828 2796
@@ -3436,8 +3404,7 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock,
3436 reg_value = I915_READ(WM1_LP_ILK); 3404 reg_value = I915_READ(WM1_LP_ILK);
3437 reg_value &= ~(WM1_LP_LATENCY_MASK | WM1_LP_SR_MASK | 3405 reg_value &= ~(WM1_LP_LATENCY_MASK | WM1_LP_SR_MASK |
3438 WM1_LP_CURSOR_MASK); 3406 WM1_LP_CURSOR_MASK);
3439 reg_value |= WM1_LP_SR_EN | 3407 reg_value |= (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) |
3440 (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) |
3441 (sr_wm << WM1_LP_SR_SHIFT) | cursor_wm; 3408 (sr_wm << WM1_LP_SR_SHIFT) | cursor_wm;
3442 3409
3443 I915_WRITE(WM1_LP_ILK, reg_value); 3410 I915_WRITE(WM1_LP_ILK, reg_value);
@@ -3554,10 +3521,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3554 u32 dpll = 0, fp = 0, fp2 = 0, dspcntr, pipeconf; 3521 u32 dpll = 0, fp = 0, fp2 = 0, dspcntr, pipeconf;
3555 bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false; 3522 bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
3556 bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; 3523 bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
3557 bool is_edp = false; 3524 struct intel_encoder *has_edp_encoder = NULL;
3558 struct drm_mode_config *mode_config = &dev->mode_config; 3525 struct drm_mode_config *mode_config = &dev->mode_config;
3559 struct drm_encoder *encoder; 3526 struct drm_encoder *encoder;
3560 struct intel_encoder *intel_encoder = NULL;
3561 const intel_limit_t *limit; 3527 const intel_limit_t *limit;
3562 int ret; 3528 int ret;
3563 struct fdi_m_n m_n = {0}; 3529 struct fdi_m_n m_n = {0};
@@ -3578,12 +3544,12 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3578 drm_vblank_pre_modeset(dev, pipe); 3544 drm_vblank_pre_modeset(dev, pipe);
3579 3545
3580 list_for_each_entry(encoder, &mode_config->encoder_list, head) { 3546 list_for_each_entry(encoder, &mode_config->encoder_list, head) {
3547 struct intel_encoder *intel_encoder;
3581 3548
3582 if (!encoder || encoder->crtc != crtc) 3549 if (encoder->crtc != crtc)
3583 continue; 3550 continue;
3584 3551
3585 intel_encoder = enc_to_intel_encoder(encoder); 3552 intel_encoder = enc_to_intel_encoder(encoder);
3586
3587 switch (intel_encoder->type) { 3553 switch (intel_encoder->type) {
3588 case INTEL_OUTPUT_LVDS: 3554 case INTEL_OUTPUT_LVDS:
3589 is_lvds = true; 3555 is_lvds = true;
@@ -3607,7 +3573,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3607 is_dp = true; 3573 is_dp = true;
3608 break; 3574 break;
3609 case INTEL_OUTPUT_EDP: 3575 case INTEL_OUTPUT_EDP:
3610 is_edp = true; 3576 has_edp_encoder = intel_encoder;
3611 break; 3577 break;
3612 } 3578 }
3613 3579
@@ -3685,10 +3651,10 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3685 int lane = 0, link_bw, bpp; 3651 int lane = 0, link_bw, bpp;
3686 /* eDP doesn't require FDI link, so just set DP M/N 3652 /* eDP doesn't require FDI link, so just set DP M/N
3687 according to current link config */ 3653 according to current link config */
3688 if (is_edp) { 3654 if (has_edp_encoder) {
3689 target_clock = mode->clock; 3655 target_clock = mode->clock;
3690 intel_edp_link_config(intel_encoder, 3656 intel_edp_link_config(has_edp_encoder,
3691 &lane, &link_bw); 3657 &lane, &link_bw);
3692 } else { 3658 } else {
3693 /* DP over FDI requires target mode clock 3659 /* DP over FDI requires target mode clock
3694 instead of link clock */ 3660 instead of link clock */
@@ -3709,7 +3675,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3709 temp |= PIPE_8BPC; 3675 temp |= PIPE_8BPC;
3710 else 3676 else
3711 temp |= PIPE_6BPC; 3677 temp |= PIPE_6BPC;
3712 } else if (is_edp || (is_dp && intel_pch_has_edp(crtc))) { 3678 } else if (has_edp_encoder || (is_dp && intel_pch_has_edp(crtc))) {
3713 switch (dev_priv->edp_bpp/3) { 3679 switch (dev_priv->edp_bpp/3) {
3714 case 8: 3680 case 8:
3715 temp |= PIPE_8BPC; 3681 temp |= PIPE_8BPC;
@@ -3782,7 +3748,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3782 3748
3783 udelay(200); 3749 udelay(200);
3784 3750
3785 if (is_edp) { 3751 if (has_edp_encoder) {
3786 if (dev_priv->lvds_use_ssc) { 3752 if (dev_priv->lvds_use_ssc) {
3787 temp |= DREF_SSC1_ENABLE; 3753 temp |= DREF_SSC1_ENABLE;
3788 I915_WRITE(PCH_DREF_CONTROL, temp); 3754 I915_WRITE(PCH_DREF_CONTROL, temp);
@@ -3931,7 +3897,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3931 dpll_reg = pch_dpll_reg; 3897 dpll_reg = pch_dpll_reg;
3932 } 3898 }
3933 3899
3934 if (!is_edp) { 3900 if (!has_edp_encoder) {
3935 I915_WRITE(fp_reg, fp); 3901 I915_WRITE(fp_reg, fp);
3936 I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE); 3902 I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
3937 I915_READ(dpll_reg); 3903 I915_READ(dpll_reg);
@@ -4026,7 +3992,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
4026 } 3992 }
4027 } 3993 }
4028 3994
4029 if (!is_edp) { 3995 if (!has_edp_encoder) {
4030 I915_WRITE(fp_reg, fp); 3996 I915_WRITE(fp_reg, fp);
4031 I915_WRITE(dpll_reg, dpll); 3997 I915_WRITE(dpll_reg, dpll);
4032 I915_READ(dpll_reg); 3998 I915_READ(dpll_reg);
@@ -4105,7 +4071,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
4105 I915_WRITE(link_m1_reg, m_n.link_m); 4071 I915_WRITE(link_m1_reg, m_n.link_m);
4106 I915_WRITE(link_n1_reg, m_n.link_n); 4072 I915_WRITE(link_n1_reg, m_n.link_n);
4107 4073
4108 if (is_edp) { 4074 if (has_edp_encoder) {
4109 ironlake_set_pll_edp(crtc, adjusted_mode->clock); 4075 ironlake_set_pll_edp(crtc, adjusted_mode->clock);
4110 } else { 4076 } else {
4111 /* enable FDI RX PLL too */ 4077 /* enable FDI RX PLL too */
@@ -4911,15 +4877,6 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
4911 kfree(intel_crtc); 4877 kfree(intel_crtc);
4912} 4878}
4913 4879
4914struct intel_unpin_work {
4915 struct work_struct work;
4916 struct drm_device *dev;
4917 struct drm_gem_object *old_fb_obj;
4918 struct drm_gem_object *pending_flip_obj;
4919 struct drm_pending_vblank_event *event;
4920 int pending;
4921};
4922
4923static void intel_unpin_work_fn(struct work_struct *__work) 4880static void intel_unpin_work_fn(struct work_struct *__work)
4924{ 4881{
4925 struct intel_unpin_work *work = 4882 struct intel_unpin_work *work =
@@ -5007,7 +4964,8 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
5007 4964
5008 spin_lock_irqsave(&dev->event_lock, flags); 4965 spin_lock_irqsave(&dev->event_lock, flags);
5009 if (intel_crtc->unpin_work) { 4966 if (intel_crtc->unpin_work) {
5010 intel_crtc->unpin_work->pending = 1; 4967 if ((++intel_crtc->unpin_work->pending) > 1)
4968 DRM_ERROR("Prepared flip multiple times\n");
5011 } else { 4969 } else {
5012 DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n"); 4970 DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
5013 } 4971 }
@@ -5026,9 +4984,9 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
5026 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4984 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5027 struct intel_unpin_work *work; 4985 struct intel_unpin_work *work;
5028 unsigned long flags, offset; 4986 unsigned long flags, offset;
5029 int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC; 4987 int pipe = intel_crtc->pipe;
5030 int ret, pipesrc; 4988 u32 pf, pipesrc;
5031 u32 flip_mask; 4989 int ret;
5032 4990
5033 work = kzalloc(sizeof *work, GFP_KERNEL); 4991 work = kzalloc(sizeof *work, GFP_KERNEL);
5034 if (work == NULL) 4992 if (work == NULL)
@@ -5077,42 +5035,73 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
5077 atomic_inc(&obj_priv->pending_flip); 5035 atomic_inc(&obj_priv->pending_flip);
5078 work->pending_flip_obj = obj; 5036 work->pending_flip_obj = obj;
5079 5037
5080 if (intel_crtc->plane)
5081 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
5082 else
5083 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
5084
5085 if (IS_GEN3(dev) || IS_GEN2(dev)) { 5038 if (IS_GEN3(dev) || IS_GEN2(dev)) {
5039 u32 flip_mask;
5040
5041 if (intel_crtc->plane)
5042 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
5043 else
5044 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
5045
5086 BEGIN_LP_RING(2); 5046 BEGIN_LP_RING(2);
5087 OUT_RING(MI_WAIT_FOR_EVENT | flip_mask); 5047 OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
5088 OUT_RING(0); 5048 OUT_RING(0);
5089 ADVANCE_LP_RING(); 5049 ADVANCE_LP_RING();
5090 } 5050 }
5091 5051
5052 work->enable_stall_check = true;
5053
5092 /* Offset into the new buffer for cases of shared fbs between CRTCs */ 5054 /* Offset into the new buffer for cases of shared fbs between CRTCs */
5093 offset = obj_priv->gtt_offset; 5055 offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
5094 offset += (crtc->y * fb->pitch) + (crtc->x * (fb->bits_per_pixel) / 8);
5095 5056
5096 BEGIN_LP_RING(4); 5057 BEGIN_LP_RING(4);
5097 if (IS_I965G(dev)) { 5058 switch(INTEL_INFO(dev)->gen) {
5059 case 2:
5098 OUT_RING(MI_DISPLAY_FLIP | 5060 OUT_RING(MI_DISPLAY_FLIP |
5099 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 5061 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
5100 OUT_RING(fb->pitch); 5062 OUT_RING(fb->pitch);
5101 OUT_RING(offset | obj_priv->tiling_mode); 5063 OUT_RING(obj_priv->gtt_offset + offset);
5102 pipesrc = I915_READ(pipesrc_reg); 5064 OUT_RING(MI_NOOP);
5103 OUT_RING(pipesrc & 0x0fff0fff); 5065 break;
5104 } else if (IS_GEN3(dev)) { 5066
5067 case 3:
5105 OUT_RING(MI_DISPLAY_FLIP_I915 | 5068 OUT_RING(MI_DISPLAY_FLIP_I915 |
5106 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 5069 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
5107 OUT_RING(fb->pitch); 5070 OUT_RING(fb->pitch);
5108 OUT_RING(offset); 5071 OUT_RING(obj_priv->gtt_offset + offset);
5109 OUT_RING(MI_NOOP); 5072 OUT_RING(MI_NOOP);
5110 } else { 5073 break;
5074
5075 case 4:
5076 case 5:
5077 /* i965+ uses the linear or tiled offsets from the
5078 * Display Registers (which do not change across a page-flip)
5079 * so we need only reprogram the base address.
5080 */
5111 OUT_RING(MI_DISPLAY_FLIP | 5081 OUT_RING(MI_DISPLAY_FLIP |
5112 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 5082 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
5113 OUT_RING(fb->pitch); 5083 OUT_RING(fb->pitch);
5114 OUT_RING(offset); 5084 OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode);
5115 OUT_RING(MI_NOOP); 5085
5086 /* XXX Enabling the panel-fitter across page-flip is so far
5087 * untested on non-native modes, so ignore it for now.
5088 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
5089 */
5090 pf = 0;
5091 pipesrc = I915_READ(pipe == 0 ? PIPEASRC : PIPEBSRC) & 0x0fff0fff;
5092 OUT_RING(pf | pipesrc);
5093 break;
5094
5095 case 6:
5096 OUT_RING(MI_DISPLAY_FLIP |
5097 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
5098 OUT_RING(fb->pitch | obj_priv->tiling_mode);
5099 OUT_RING(obj_priv->gtt_offset);
5100
5101 pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
5102 pipesrc = I915_READ(pipe == 0 ? PIPEASRC : PIPEBSRC) & 0x0fff0fff;
5103 OUT_RING(pf | pipesrc);
5104 break;
5116 } 5105 }
5117 ADVANCE_LP_RING(); 5106 ADVANCE_LP_RING();
5118 5107
@@ -5193,7 +5182,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
5193 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base; 5182 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
5194 5183
5195 intel_crtc->cursor_addr = 0; 5184 intel_crtc->cursor_addr = 0;
5196 intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF; 5185 intel_crtc->dpms_mode = -1;
5197 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); 5186 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
5198 5187
5199 intel_crtc->busy = false; 5188 intel_crtc->busy = false;
@@ -5701,6 +5690,9 @@ void intel_init_clock_gating(struct drm_device *dev)
5701 I915_WRITE(DISP_ARB_CTL, 5690 I915_WRITE(DISP_ARB_CTL,
5702 (I915_READ(DISP_ARB_CTL) | 5691 (I915_READ(DISP_ARB_CTL) |
5703 DISP_FBC_WM_DIS)); 5692 DISP_FBC_WM_DIS));
5693 I915_WRITE(WM3_LP_ILK, 0);
5694 I915_WRITE(WM2_LP_ILK, 0);
5695 I915_WRITE(WM1_LP_ILK, 0);
5704 } 5696 }
5705 /* 5697 /*
5706 * Based on the document from hardware guys the following bits 5698 * Based on the document from hardware guys the following bits
@@ -5722,8 +5714,7 @@ void intel_init_clock_gating(struct drm_device *dev)
5722 ILK_DPFC_DIS2 | 5714 ILK_DPFC_DIS2 |
5723 ILK_CLK_FBC); 5715 ILK_CLK_FBC);
5724 } 5716 }
5725 if (IS_GEN6(dev)) 5717 return;
5726 return;
5727 } else if (IS_G4X(dev)) { 5718 } else if (IS_G4X(dev)) {
5728 uint32_t dspclk_gate; 5719 uint32_t dspclk_gate;
5729 I915_WRITE(RENCLK_GATE_D1, 0); 5720 I915_WRITE(RENCLK_GATE_D1, 0);
@@ -5784,11 +5775,9 @@ void intel_init_clock_gating(struct drm_device *dev)
5784 OUT_RING(MI_FLUSH); 5775 OUT_RING(MI_FLUSH);
5785 ADVANCE_LP_RING(); 5776 ADVANCE_LP_RING();
5786 } 5777 }
5787 } else { 5778 } else
5788 DRM_DEBUG_KMS("Failed to allocate render context." 5779 DRM_DEBUG_KMS("Failed to allocate render context."
5789 "Disable RC6\n"); 5780 "Disable RC6\n");
5790 return;
5791 }
5792 } 5781 }
5793 5782
5794 if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) { 5783 if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) {
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 9caccd03dccb..9ab8708ac6ba 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -239,7 +239,6 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
239 uint32_t ch_data = ch_ctl + 4; 239 uint32_t ch_data = ch_ctl + 4;
240 int i; 240 int i;
241 int recv_bytes; 241 int recv_bytes;
242 uint32_t ctl;
243 uint32_t status; 242 uint32_t status;
244 uint32_t aux_clock_divider; 243 uint32_t aux_clock_divider;
245 int try, precharge; 244 int try, precharge;
@@ -263,41 +262,43 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
263 else 262 else
264 precharge = 5; 263 precharge = 5;
265 264
265 if (I915_READ(ch_ctl) & DP_AUX_CH_CTL_SEND_BUSY) {
266 DRM_ERROR("dp_aux_ch not started status 0x%08x\n",
267 I915_READ(ch_ctl));
268 return -EBUSY;
269 }
270
266 /* Must try at least 3 times according to DP spec */ 271 /* Must try at least 3 times according to DP spec */
267 for (try = 0; try < 5; try++) { 272 for (try = 0; try < 5; try++) {
268 /* Load the send data into the aux channel data registers */ 273 /* Load the send data into the aux channel data registers */
269 for (i = 0; i < send_bytes; i += 4) { 274 for (i = 0; i < send_bytes; i += 4)
270 uint32_t d = pack_aux(send + i, send_bytes - i); 275 I915_WRITE(ch_data + i,
271 276 pack_aux(send + i, send_bytes - i));
272 I915_WRITE(ch_data + i, d);
273 }
274
275 ctl = (DP_AUX_CH_CTL_SEND_BUSY |
276 DP_AUX_CH_CTL_TIME_OUT_400us |
277 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
278 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
279 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
280 DP_AUX_CH_CTL_DONE |
281 DP_AUX_CH_CTL_TIME_OUT_ERROR |
282 DP_AUX_CH_CTL_RECEIVE_ERROR);
283 277
284 /* Send the command and wait for it to complete */ 278 /* Send the command and wait for it to complete */
285 I915_WRITE(ch_ctl, ctl); 279 I915_WRITE(ch_ctl,
286 (void) I915_READ(ch_ctl); 280 DP_AUX_CH_CTL_SEND_BUSY |
281 DP_AUX_CH_CTL_TIME_OUT_400us |
282 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
283 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
284 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
285 DP_AUX_CH_CTL_DONE |
286 DP_AUX_CH_CTL_TIME_OUT_ERROR |
287 DP_AUX_CH_CTL_RECEIVE_ERROR);
287 for (;;) { 288 for (;;) {
288 udelay(100);
289 status = I915_READ(ch_ctl); 289 status = I915_READ(ch_ctl);
290 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) 290 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
291 break; 291 break;
292 udelay(100);
292 } 293 }
293 294
294 /* Clear done status and any errors */ 295 /* Clear done status and any errors */
295 I915_WRITE(ch_ctl, (status | 296 I915_WRITE(ch_ctl,
296 DP_AUX_CH_CTL_DONE | 297 status |
297 DP_AUX_CH_CTL_TIME_OUT_ERROR | 298 DP_AUX_CH_CTL_DONE |
298 DP_AUX_CH_CTL_RECEIVE_ERROR)); 299 DP_AUX_CH_CTL_TIME_OUT_ERROR |
299 (void) I915_READ(ch_ctl); 300 DP_AUX_CH_CTL_RECEIVE_ERROR);
300 if ((status & DP_AUX_CH_CTL_TIME_OUT_ERROR) == 0) 301 if (status & DP_AUX_CH_CTL_DONE)
301 break; 302 break;
302 } 303 }
303 304
@@ -324,15 +325,12 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
324 /* Unload any bytes sent back from the other side */ 325 /* Unload any bytes sent back from the other side */
325 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> 326 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
326 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT); 327 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
327
328 if (recv_bytes > recv_size) 328 if (recv_bytes > recv_size)
329 recv_bytes = recv_size; 329 recv_bytes = recv_size;
330 330
331 for (i = 0; i < recv_bytes; i += 4) { 331 for (i = 0; i < recv_bytes; i += 4)
332 uint32_t d = I915_READ(ch_data + i); 332 unpack_aux(I915_READ(ch_data + i),
333 333 recv + i, recv_bytes - i);
334 unpack_aux(d, recv + i, recv_bytes - i);
335 }
336 334
337 return recv_bytes; 335 return recv_bytes;
338} 336}
@@ -1140,18 +1138,14 @@ static bool
1140intel_dp_set_link_train(struct intel_dp *intel_dp, 1138intel_dp_set_link_train(struct intel_dp *intel_dp,
1141 uint32_t dp_reg_value, 1139 uint32_t dp_reg_value,
1142 uint8_t dp_train_pat, 1140 uint8_t dp_train_pat,
1143 uint8_t train_set[4], 1141 uint8_t train_set[4])
1144 bool first)
1145{ 1142{
1146 struct drm_device *dev = intel_dp->base.enc.dev; 1143 struct drm_device *dev = intel_dp->base.enc.dev;
1147 struct drm_i915_private *dev_priv = dev->dev_private; 1144 struct drm_i915_private *dev_priv = dev->dev_private;
1148 struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.enc.crtc);
1149 int ret; 1145 int ret;
1150 1146
1151 I915_WRITE(intel_dp->output_reg, dp_reg_value); 1147 I915_WRITE(intel_dp->output_reg, dp_reg_value);
1152 POSTING_READ(intel_dp->output_reg); 1148 POSTING_READ(intel_dp->output_reg);
1153 if (first)
1154 intel_wait_for_vblank(dev, intel_crtc->pipe);
1155 1149
1156 intel_dp_aux_native_write_1(intel_dp, 1150 intel_dp_aux_native_write_1(intel_dp,
1157 DP_TRAINING_PATTERN_SET, 1151 DP_TRAINING_PATTERN_SET,
@@ -1176,10 +1170,15 @@ intel_dp_link_train(struct intel_dp *intel_dp)
1176 uint8_t voltage; 1170 uint8_t voltage;
1177 bool clock_recovery = false; 1171 bool clock_recovery = false;
1178 bool channel_eq = false; 1172 bool channel_eq = false;
1179 bool first = true;
1180 int tries; 1173 int tries;
1181 u32 reg; 1174 u32 reg;
1182 uint32_t DP = intel_dp->DP; 1175 uint32_t DP = intel_dp->DP;
1176 struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.enc.crtc);
1177
1178 /* Enable output, wait for it to become active */
1179 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
1180 POSTING_READ(intel_dp->output_reg);
1181 intel_wait_for_vblank(dev, intel_crtc->pipe);
1183 1182
1184 /* Write the link configuration data */ 1183 /* Write the link configuration data */
1185 intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, 1184 intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
@@ -1212,9 +1211,8 @@ intel_dp_link_train(struct intel_dp *intel_dp)
1212 reg = DP | DP_LINK_TRAIN_PAT_1; 1211 reg = DP | DP_LINK_TRAIN_PAT_1;
1213 1212
1214 if (!intel_dp_set_link_train(intel_dp, reg, 1213 if (!intel_dp_set_link_train(intel_dp, reg,
1215 DP_TRAINING_PATTERN_1, train_set, first)) 1214 DP_TRAINING_PATTERN_1, train_set))
1216 break; 1215 break;
1217 first = false;
1218 /* Set training pattern 1 */ 1216 /* Set training pattern 1 */
1219 1217
1220 udelay(100); 1218 udelay(100);
@@ -1268,8 +1266,7 @@ intel_dp_link_train(struct intel_dp *intel_dp)
1268 1266
1269 /* channel eq pattern */ 1267 /* channel eq pattern */
1270 if (!intel_dp_set_link_train(intel_dp, reg, 1268 if (!intel_dp_set_link_train(intel_dp, reg,
1271 DP_TRAINING_PATTERN_2, train_set, 1269 DP_TRAINING_PATTERN_2, train_set))
1272 false))
1273 break; 1270 break;
1274 1271
1275 udelay(400); 1272 udelay(400);
@@ -1388,7 +1385,7 @@ ironlake_dp_detect(struct drm_connector *connector)
1388 * \return false if DP port is disconnected. 1385 * \return false if DP port is disconnected.
1389 */ 1386 */
1390static enum drm_connector_status 1387static enum drm_connector_status
1391intel_dp_detect(struct drm_connector *connector) 1388intel_dp_detect(struct drm_connector *connector, bool force)
1392{ 1389{
1393 struct drm_encoder *encoder = intel_attached_encoder(connector); 1390 struct drm_encoder *encoder = intel_attached_encoder(connector);
1394 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1391 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 0e92aa07b382..8828b3ac6414 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -176,6 +176,16 @@ struct intel_crtc {
176#define enc_to_intel_encoder(x) container_of(x, struct intel_encoder, enc) 176#define enc_to_intel_encoder(x) container_of(x, struct intel_encoder, enc)
177#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) 177#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
178 178
179struct intel_unpin_work {
180 struct work_struct work;
181 struct drm_device *dev;
182 struct drm_gem_object *old_fb_obj;
183 struct drm_gem_object *pending_flip_obj;
184 struct drm_pending_vblank_event *event;
185 int pending;
186 bool enable_stall_check;
187};
188
179struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg, 189struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg,
180 const char *name); 190 const char *name);
181void intel_i2c_destroy(struct i2c_adapter *adapter); 191void intel_i2c_destroy(struct i2c_adapter *adapter);
@@ -219,7 +229,6 @@ extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
219 struct drm_crtc *crtc); 229 struct drm_crtc *crtc);
220int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, 230int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
221 struct drm_file *file_priv); 231 struct drm_file *file_priv);
222extern void intel_wait_for_vblank_off(struct drm_device *dev, int pipe);
223extern void intel_wait_for_vblank(struct drm_device *dev, int pipe); 232extern void intel_wait_for_vblank(struct drm_device *dev, int pipe);
224extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe); 233extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe);
225extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, 234extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index a399f4b2c1c5..7c9ec1472d46 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -221,7 +221,8 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
221 * 221 *
222 * Unimplemented. 222 * Unimplemented.
223 */ 223 */
224static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector) 224static enum drm_connector_status
225intel_dvo_detect(struct drm_connector *connector, bool force)
225{ 226{
226 struct drm_encoder *encoder = intel_attached_encoder(connector); 227 struct drm_encoder *encoder = intel_attached_encoder(connector);
227 struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder); 228 struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 7bdc96256bf5..b61966c126d3 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -237,8 +237,10 @@ int intel_fbdev_destroy(struct drm_device *dev,
237 drm_fb_helper_fini(&ifbdev->helper); 237 drm_fb_helper_fini(&ifbdev->helper);
238 238
239 drm_framebuffer_cleanup(&ifb->base); 239 drm_framebuffer_cleanup(&ifb->base);
240 if (ifb->obj) 240 if (ifb->obj) {
241 drm_gem_object_unreference(ifb->obj); 241 drm_gem_object_unreference(ifb->obj);
242 ifb->obj = NULL;
243 }
242 244
243 return 0; 245 return 0;
244} 246}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index ccd4c97e6524..926934a482ec 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -139,7 +139,7 @@ static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
139} 139}
140 140
141static enum drm_connector_status 141static enum drm_connector_status
142intel_hdmi_detect(struct drm_connector *connector) 142intel_hdmi_detect(struct drm_connector *connector, bool force)
143{ 143{
144 struct drm_encoder *encoder = intel_attached_encoder(connector); 144 struct drm_encoder *encoder = intel_attached_encoder(connector);
145 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 145 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index b819c1081147..6ec39a86ed06 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -445,7 +445,8 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
445 * connected and closed means disconnected. We also send hotplug events as 445 * connected and closed means disconnected. We also send hotplug events as
446 * needed, using lid status notification from the input layer. 446 * needed, using lid status notification from the input layer.
447 */ 447 */
448static enum drm_connector_status intel_lvds_detect(struct drm_connector *connector) 448static enum drm_connector_status
449intel_lvds_detect(struct drm_connector *connector, bool force)
449{ 450{
450 struct drm_device *dev = connector->dev; 451 struct drm_device *dev = connector->dev;
451 enum drm_connector_status status = connector_status_connected; 452 enum drm_connector_status status = connector_status_connected;
@@ -540,7 +541,9 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
540 * the LID nofication event. 541 * the LID nofication event.
541 */ 542 */
542 if (connector) 543 if (connector)
543 connector->status = connector->funcs->detect(connector); 544 connector->status = connector->funcs->detect(connector,
545 false);
546
544 /* Don't force modeset on machines where it causes a GPU lockup */ 547 /* Don't force modeset on machines where it causes a GPU lockup */
545 if (dmi_check_system(intel_no_modeset_on_lid)) 548 if (dmi_check_system(intel_no_modeset_on_lid))
546 return NOTIFY_OK; 549 return NOTIFY_OK;
@@ -875,8 +878,6 @@ void intel_lvds_init(struct drm_device *dev)
875 878
876 intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT); 879 intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
877 intel_encoder->crtc_mask = (1 << 1); 880 intel_encoder->crtc_mask = (1 << 1);
878 if (IS_I965G(dev))
879 intel_encoder->crtc_mask |= (1 << 0);
880 drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs); 881 drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs);
881 drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs); 882 drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
882 connector->display_info.subpixel_order = SubPixelHorizontalRGB; 883 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 4f00390d7c61..1d306a458be6 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -25,6 +25,8 @@
25 * 25 *
26 * Derived from Xorg ddx, xf86-video-intel, src/i830_video.c 26 * Derived from Xorg ddx, xf86-video-intel, src/i830_video.c
27 */ 27 */
28
29#include <linux/seq_file.h>
28#include "drmP.h" 30#include "drmP.h"
29#include "drm.h" 31#include "drm.h"
30#include "i915_drm.h" 32#include "i915_drm.h"
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 51e9c9e718c4..cb3508f78bc3 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -220,9 +220,13 @@ static int init_render_ring(struct drm_device *dev,
220{ 220{
221 drm_i915_private_t *dev_priv = dev->dev_private; 221 drm_i915_private_t *dev_priv = dev->dev_private;
222 int ret = init_ring_common(dev, ring); 222 int ret = init_ring_common(dev, ring);
223 int mode;
224
223 if (IS_I9XX(dev) && !IS_GEN3(dev)) { 225 if (IS_I9XX(dev) && !IS_GEN3(dev)) {
224 I915_WRITE(MI_MODE, 226 mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
225 (VS_TIMER_DISPATCH) << 16 | VS_TIMER_DISPATCH); 227 if (IS_GEN6(dev))
228 mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
229 I915_WRITE(MI_MODE, mode);
226 } 230 }
227 return ret; 231 return ret;
228} 232}
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 093e914e8a41..ee73e428a84a 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1061,8 +1061,9 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
1061 if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, mode)) 1061 if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, mode))
1062 return false; 1062 return false;
1063 1063
1064 if (!intel_sdvo_set_input_timings_for_mode(intel_sdvo, mode, adjusted_mode)) 1064 (void) intel_sdvo_set_input_timings_for_mode(intel_sdvo,
1065 return false; 1065 mode,
1066 adjusted_mode);
1066 } else if (intel_sdvo->is_lvds) { 1067 } else if (intel_sdvo->is_lvds) {
1067 drm_mode_set_crtcinfo(intel_sdvo->sdvo_lvds_fixed_mode, 0); 1068 drm_mode_set_crtcinfo(intel_sdvo->sdvo_lvds_fixed_mode, 0);
1068 1069
@@ -1070,8 +1071,9 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
1070 intel_sdvo->sdvo_lvds_fixed_mode)) 1071 intel_sdvo->sdvo_lvds_fixed_mode))
1071 return false; 1072 return false;
1072 1073
1073 if (!intel_sdvo_set_input_timings_for_mode(intel_sdvo, mode, adjusted_mode)) 1074 (void) intel_sdvo_set_input_timings_for_mode(intel_sdvo,
1074 return false; 1075 mode,
1076 adjusted_mode);
1075 } 1077 }
1076 1078
1077 /* Make the CRTC code factor in the SDVO pixel multiplier. The 1079 /* Make the CRTC code factor in the SDVO pixel multiplier. The
@@ -1108,10 +1110,9 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1108 in_out.in0 = intel_sdvo->attached_output; 1110 in_out.in0 = intel_sdvo->attached_output;
1109 in_out.in1 = 0; 1111 in_out.in1 = 0;
1110 1112
1111 if (!intel_sdvo_set_value(intel_sdvo, 1113 intel_sdvo_set_value(intel_sdvo,
1112 SDVO_CMD_SET_IN_OUT_MAP, 1114 SDVO_CMD_SET_IN_OUT_MAP,
1113 &in_out, sizeof(in_out))) 1115 &in_out, sizeof(in_out));
1114 return;
1115 1116
1116 if (intel_sdvo->is_hdmi) { 1117 if (intel_sdvo->is_hdmi) {
1117 if (!intel_sdvo_set_avi_infoframe(intel_sdvo, mode)) 1118 if (!intel_sdvo_set_avi_infoframe(intel_sdvo, mode))
@@ -1122,11 +1123,9 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1122 1123
1123 /* We have tried to get input timing in mode_fixup, and filled into 1124 /* We have tried to get input timing in mode_fixup, and filled into
1124 adjusted_mode */ 1125 adjusted_mode */
1125 if (intel_sdvo->is_tv || intel_sdvo->is_lvds) { 1126 intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
1126 intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode); 1127 if (intel_sdvo->is_tv || intel_sdvo->is_lvds)
1127 input_dtd.part2.sdvo_flags = intel_sdvo->sdvo_flags; 1128 input_dtd.part2.sdvo_flags = intel_sdvo->sdvo_flags;
1128 } else
1129 intel_sdvo_get_dtd_from_mode(&input_dtd, mode);
1130 1129
1131 /* If it's a TV, we already set the output timing in mode_fixup. 1130 /* If it's a TV, we already set the output timing in mode_fixup.
1132 * Otherwise, the output timing is equal to the input timing. 1131 * Otherwise, the output timing is equal to the input timing.
@@ -1137,8 +1136,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1137 intel_sdvo->attached_output)) 1136 intel_sdvo->attached_output))
1138 return; 1137 return;
1139 1138
1140 if (!intel_sdvo_set_output_timing(intel_sdvo, &input_dtd)) 1139 (void) intel_sdvo_set_output_timing(intel_sdvo, &input_dtd);
1141 return;
1142 } 1140 }
1143 1141
1144 /* Set the input timing to the screen. Assume always input 0. */ 1142 /* Set the input timing to the screen. Assume always input 0. */
@@ -1165,8 +1163,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1165 intel_sdvo_set_input_timing(encoder, &input_dtd); 1163 intel_sdvo_set_input_timing(encoder, &input_dtd);
1166 } 1164 }
1167#else 1165#else
1168 if (!intel_sdvo_set_input_timing(intel_sdvo, &input_dtd)) 1166 (void) intel_sdvo_set_input_timing(intel_sdvo, &input_dtd);
1169 return;
1170#endif 1167#endif
1171 1168
1172 sdvo_pixel_multiply = intel_sdvo_get_pixel_multiplier(mode); 1169 sdvo_pixel_multiply = intel_sdvo_get_pixel_multiplier(mode);
@@ -1420,7 +1417,7 @@ intel_analog_is_connected(struct drm_device *dev)
1420 if (!analog_connector) 1417 if (!analog_connector)
1421 return false; 1418 return false;
1422 1419
1423 if (analog_connector->funcs->detect(analog_connector) == 1420 if (analog_connector->funcs->detect(analog_connector, false) ==
1424 connector_status_disconnected) 1421 connector_status_disconnected)
1425 return false; 1422 return false;
1426 1423
@@ -1489,7 +1486,8 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
1489 return status; 1486 return status;
1490} 1487}
1491 1488
1492static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connector) 1489static enum drm_connector_status
1490intel_sdvo_detect(struct drm_connector *connector, bool force)
1493{ 1491{
1494 uint16_t response; 1492 uint16_t response;
1495 struct drm_encoder *encoder = intel_attached_encoder(connector); 1493 struct drm_encoder *encoder = intel_attached_encoder(connector);
@@ -1932,6 +1930,41 @@ static const struct drm_encoder_funcs intel_sdvo_enc_funcs = {
1932 .destroy = intel_sdvo_enc_destroy, 1930 .destroy = intel_sdvo_enc_destroy,
1933}; 1931};
1934 1932
1933static void
1934intel_sdvo_guess_ddc_bus(struct intel_sdvo *sdvo)
1935{
1936 uint16_t mask = 0;
1937 unsigned int num_bits;
1938
1939 /* Make a mask of outputs less than or equal to our own priority in the
1940 * list.
1941 */
1942 switch (sdvo->controlled_output) {
1943 case SDVO_OUTPUT_LVDS1:
1944 mask |= SDVO_OUTPUT_LVDS1;
1945 case SDVO_OUTPUT_LVDS0:
1946 mask |= SDVO_OUTPUT_LVDS0;
1947 case SDVO_OUTPUT_TMDS1:
1948 mask |= SDVO_OUTPUT_TMDS1;
1949 case SDVO_OUTPUT_TMDS0:
1950 mask |= SDVO_OUTPUT_TMDS0;
1951 case SDVO_OUTPUT_RGB1:
1952 mask |= SDVO_OUTPUT_RGB1;
1953 case SDVO_OUTPUT_RGB0:
1954 mask |= SDVO_OUTPUT_RGB0;
1955 break;
1956 }
1957
1958 /* Count bits to find what number we are in the priority list. */
1959 mask &= sdvo->caps.output_flags;
1960 num_bits = hweight16(mask);
1961 /* If more than 3 outputs, default to DDC bus 3 for now. */
1962 if (num_bits > 3)
1963 num_bits = 3;
1964
1965 /* Corresponds to SDVO_CONTROL_BUS_DDCx */
1966 sdvo->ddc_bus = 1 << num_bits;
1967}
1935 1968
1936/** 1969/**
1937 * Choose the appropriate DDC bus for control bus switch command for this 1970 * Choose the appropriate DDC bus for control bus switch command for this
@@ -1951,7 +1984,10 @@ intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv,
1951 else 1984 else
1952 mapping = &(dev_priv->sdvo_mappings[1]); 1985 mapping = &(dev_priv->sdvo_mappings[1]);
1953 1986
1954 sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4); 1987 if (mapping->initialized)
1988 sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4);
1989 else
1990 intel_sdvo_guess_ddc_bus(sdvo);
1955} 1991}
1956 1992
1957static bool 1993static bool
@@ -2134,8 +2170,7 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
2134 return true; 2170 return true;
2135 2171
2136err: 2172err:
2137 intel_sdvo_destroy_enhance_property(connector); 2173 intel_sdvo_destroy(connector);
2138 kfree(intel_sdvo_connector);
2139 return false; 2174 return false;
2140} 2175}
2141 2176
@@ -2207,8 +2242,7 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
2207 return true; 2242 return true;
2208 2243
2209err: 2244err:
2210 intel_sdvo_destroy_enhance_property(connector); 2245 intel_sdvo_destroy(connector);
2211 kfree(intel_sdvo_connector);
2212 return false; 2246 return false;
2213} 2247}
2214 2248
@@ -2486,11 +2520,10 @@ static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
2486 uint16_t response; 2520 uint16_t response;
2487 } enhancements; 2521 } enhancements;
2488 2522
2489 if (!intel_sdvo_get_value(intel_sdvo, 2523 enhancements.response = 0;
2490 SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS, 2524 intel_sdvo_get_value(intel_sdvo,
2491 &enhancements, sizeof(enhancements))) 2525 SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
2492 return false; 2526 &enhancements, sizeof(enhancements));
2493
2494 if (enhancements.response == 0) { 2527 if (enhancements.response == 0) {
2495 DRM_DEBUG_KMS("No enhancement is supported\n"); 2528 DRM_DEBUG_KMS("No enhancement is supported\n");
2496 return true; 2529 return true;
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index d2029efee982..4a117e318a73 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1231,7 +1231,6 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
1231 struct drm_encoder *encoder = &intel_tv->base.enc; 1231 struct drm_encoder *encoder = &intel_tv->base.enc;
1232 struct drm_device *dev = encoder->dev; 1232 struct drm_device *dev = encoder->dev;
1233 struct drm_i915_private *dev_priv = dev->dev_private; 1233 struct drm_i915_private *dev_priv = dev->dev_private;
1234 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
1235 unsigned long irqflags; 1234 unsigned long irqflags;
1236 u32 tv_ctl, save_tv_ctl; 1235 u32 tv_ctl, save_tv_ctl;
1237 u32 tv_dac, save_tv_dac; 1236 u32 tv_dac, save_tv_dac;
@@ -1268,11 +1267,15 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
1268 DAC_C_0_7_V); 1267 DAC_C_0_7_V);
1269 I915_WRITE(TV_CTL, tv_ctl); 1268 I915_WRITE(TV_CTL, tv_ctl);
1270 I915_WRITE(TV_DAC, tv_dac); 1269 I915_WRITE(TV_DAC, tv_dac);
1271 intel_wait_for_vblank(dev, intel_crtc->pipe); 1270 POSTING_READ(TV_DAC);
1271 msleep(20);
1272
1272 tv_dac = I915_READ(TV_DAC); 1273 tv_dac = I915_READ(TV_DAC);
1273 I915_WRITE(TV_DAC, save_tv_dac); 1274 I915_WRITE(TV_DAC, save_tv_dac);
1274 I915_WRITE(TV_CTL, save_tv_ctl); 1275 I915_WRITE(TV_CTL, save_tv_ctl);
1275 intel_wait_for_vblank(dev, intel_crtc->pipe); 1276 POSTING_READ(TV_CTL);
1277 msleep(20);
1278
1276 /* 1279 /*
1277 * A B C 1280 * A B C
1278 * 0 1 1 Composite 1281 * 0 1 1 Composite
@@ -1338,7 +1341,7 @@ static void intel_tv_find_better_format(struct drm_connector *connector)
1338 * we have a pipe programmed in order to probe the TV. 1341 * we have a pipe programmed in order to probe the TV.
1339 */ 1342 */
1340static enum drm_connector_status 1343static enum drm_connector_status
1341intel_tv_detect(struct drm_connector *connector) 1344intel_tv_detect(struct drm_connector *connector, bool force)
1342{ 1345{
1343 struct drm_display_mode mode; 1346 struct drm_display_mode mode;
1344 struct drm_encoder *encoder = intel_attached_encoder(connector); 1347 struct drm_encoder *encoder = intel_attached_encoder(connector);
@@ -1350,7 +1353,7 @@ intel_tv_detect(struct drm_connector *connector)
1350 1353
1351 if (encoder->crtc && encoder->crtc->enabled) { 1354 if (encoder->crtc && encoder->crtc->enabled) {
1352 type = intel_tv_detect_type(intel_tv); 1355 type = intel_tv_detect_type(intel_tv);
1353 } else { 1356 } else if (force) {
1354 struct drm_crtc *crtc; 1357 struct drm_crtc *crtc;
1355 int dpms_mode; 1358 int dpms_mode;
1356 1359
@@ -1361,10 +1364,9 @@ intel_tv_detect(struct drm_connector *connector)
1361 intel_release_load_detect_pipe(&intel_tv->base, connector, 1364 intel_release_load_detect_pipe(&intel_tv->base, connector,
1362 dpms_mode); 1365 dpms_mode);
1363 } else 1366 } else
1364 type = -1; 1367 return connector_status_unknown;
1365 } 1368 } else
1366 1369 return connector->status;
1367 intel_tv->type = type;
1368 1370
1369 if (type < 0) 1371 if (type < 0)
1370 return connector_status_disconnected; 1372 return connector_status_disconnected;
diff --git a/drivers/gpu/drm/mga/mga_state.c b/drivers/gpu/drm/mga/mga_state.c
index fff82045c427..9ce2827f8c00 100644
--- a/drivers/gpu/drm/mga/mga_state.c
+++ b/drivers/gpu/drm/mga/mga_state.c
@@ -1085,19 +1085,19 @@ file_priv)
1085} 1085}
1086 1086
1087struct drm_ioctl_desc mga_ioctls[] = { 1087struct drm_ioctl_desc mga_ioctls[] = {
1088 DRM_IOCTL_DEF(DRM_MGA_INIT, mga_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1088 DRM_IOCTL_DEF_DRV(MGA_INIT, mga_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1089 DRM_IOCTL_DEF(DRM_MGA_FLUSH, mga_dma_flush, DRM_AUTH), 1089 DRM_IOCTL_DEF_DRV(MGA_FLUSH, mga_dma_flush, DRM_AUTH),
1090 DRM_IOCTL_DEF(DRM_MGA_RESET, mga_dma_reset, DRM_AUTH), 1090 DRM_IOCTL_DEF_DRV(MGA_RESET, mga_dma_reset, DRM_AUTH),
1091 DRM_IOCTL_DEF(DRM_MGA_SWAP, mga_dma_swap, DRM_AUTH), 1091 DRM_IOCTL_DEF_DRV(MGA_SWAP, mga_dma_swap, DRM_AUTH),
1092 DRM_IOCTL_DEF(DRM_MGA_CLEAR, mga_dma_clear, DRM_AUTH), 1092 DRM_IOCTL_DEF_DRV(MGA_CLEAR, mga_dma_clear, DRM_AUTH),
1093 DRM_IOCTL_DEF(DRM_MGA_VERTEX, mga_dma_vertex, DRM_AUTH), 1093 DRM_IOCTL_DEF_DRV(MGA_VERTEX, mga_dma_vertex, DRM_AUTH),
1094 DRM_IOCTL_DEF(DRM_MGA_INDICES, mga_dma_indices, DRM_AUTH), 1094 DRM_IOCTL_DEF_DRV(MGA_INDICES, mga_dma_indices, DRM_AUTH),
1095 DRM_IOCTL_DEF(DRM_MGA_ILOAD, mga_dma_iload, DRM_AUTH), 1095 DRM_IOCTL_DEF_DRV(MGA_ILOAD, mga_dma_iload, DRM_AUTH),
1096 DRM_IOCTL_DEF(DRM_MGA_BLIT, mga_dma_blit, DRM_AUTH), 1096 DRM_IOCTL_DEF_DRV(MGA_BLIT, mga_dma_blit, DRM_AUTH),
1097 DRM_IOCTL_DEF(DRM_MGA_GETPARAM, mga_getparam, DRM_AUTH), 1097 DRM_IOCTL_DEF_DRV(MGA_GETPARAM, mga_getparam, DRM_AUTH),
1098 DRM_IOCTL_DEF(DRM_MGA_SET_FENCE, mga_set_fence, DRM_AUTH), 1098 DRM_IOCTL_DEF_DRV(MGA_SET_FENCE, mga_set_fence, DRM_AUTH),
1099 DRM_IOCTL_DEF(DRM_MGA_WAIT_FENCE, mga_wait_fence, DRM_AUTH), 1099 DRM_IOCTL_DEF_DRV(MGA_WAIT_FENCE, mga_wait_fence, DRM_AUTH),
1100 DRM_IOCTL_DEF(DRM_MGA_DMA_BOOTSTRAP, mga_dma_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1100 DRM_IOCTL_DEF_DRV(MGA_DMA_BOOTSTRAP, mga_dma_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1101}; 1101};
1102 1102
1103int mga_max_ioctl = DRM_ARRAY_SIZE(mga_ioctls); 1103int mga_max_ioctl = DRM_ARRAY_SIZE(mga_ioctls);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 0b69a9628c95..974b0f8ae048 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -2166,7 +2166,7 @@ peek_fb(struct drm_device *dev, struct io_mapping *fb,
2166 uint32_t val = 0; 2166 uint32_t val = 0;
2167 2167
2168 if (off < pci_resource_len(dev->pdev, 1)) { 2168 if (off < pci_resource_len(dev->pdev, 1)) {
2169 uint32_t __iomem *p = 2169 uint8_t __iomem *p =
2170 io_mapping_map_atomic_wc(fb, off & PAGE_MASK, KM_USER0); 2170 io_mapping_map_atomic_wc(fb, off & PAGE_MASK, KM_USER0);
2171 2171
2172 val = ioread32(p + (off & ~PAGE_MASK)); 2172 val = ioread32(p + (off & ~PAGE_MASK));
@@ -2182,7 +2182,7 @@ poke_fb(struct drm_device *dev, struct io_mapping *fb,
2182 uint32_t off, uint32_t val) 2182 uint32_t off, uint32_t val)
2183{ 2183{
2184 if (off < pci_resource_len(dev->pdev, 1)) { 2184 if (off < pci_resource_len(dev->pdev, 1)) {
2185 uint32_t __iomem *p = 2185 uint8_t __iomem *p =
2186 io_mapping_map_atomic_wc(fb, off & PAGE_MASK, KM_USER0); 2186 io_mapping_map_atomic_wc(fb, off & PAGE_MASK, KM_USER0);
2187 2187
2188 iowrite32(val, p + (off & ~PAGE_MASK)); 2188 iowrite32(val, p + (off & ~PAGE_MASK));
@@ -3869,27 +3869,10 @@ static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_entr
3869 } 3869 }
3870#ifdef __powerpc__ 3870#ifdef __powerpc__
3871 /* Powerbook specific quirks */ 3871 /* Powerbook specific quirks */
3872 if ((dev->pci_device & 0xffff) == 0x0179 || 3872 if (script == LVDS_RESET &&
3873 (dev->pci_device & 0xffff) == 0x0189 || 3873 (dev->pci_device == 0x0179 || dev->pci_device == 0x0189 ||
3874 (dev->pci_device & 0xffff) == 0x0329) { 3874 dev->pci_device == 0x0329))
3875 if (script == LVDS_RESET) { 3875 nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72);
3876 nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72);
3877
3878 } else if (script == LVDS_PANEL_ON) {
3879 bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL,
3880 bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL)
3881 | (1 << 31));
3882 bios_wr32(bios, NV_PCRTC_GPIO_EXT,
3883 bios_rd32(bios, NV_PCRTC_GPIO_EXT) | 1);
3884
3885 } else if (script == LVDS_PANEL_OFF) {
3886 bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL,
3887 bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL)
3888 & ~(1 << 31));
3889 bios_wr32(bios, NV_PCRTC_GPIO_EXT,
3890 bios_rd32(bios, NV_PCRTC_GPIO_EXT) & ~3);
3891 }
3892 }
3893#endif 3876#endif
3894 3877
3895 return 0; 3878 return 0;
@@ -4381,11 +4364,8 @@ int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, b
4381 * 4364 *
4382 * For the moment, a quirk will do :) 4365 * For the moment, a quirk will do :)
4383 */ 4366 */
4384 if ((dev->pdev->device == 0x01d7) && 4367 if (nv_match_device(dev, 0x01d7, 0x1028, 0x01c2))
4385 (dev->pdev->subsystem_vendor == 0x1028) &&
4386 (dev->pdev->subsystem_device == 0x01c2)) {
4387 bios->fp.duallink_transition_clk = 80000; 4368 bios->fp.duallink_transition_clk = 80000;
4388 }
4389 4369
4390 /* set dual_link flag for EDID case */ 4370 /* set dual_link flag for EDID case */
4391 if (pxclk && (chip_version < 0x25 || chip_version > 0x28)) 4371 if (pxclk && (chip_version < 0x25 || chip_version > 0x28))
@@ -4587,7 +4567,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
4587 return 1; 4567 return 1;
4588 } 4568 }
4589 4569
4590 NV_TRACE(dev, "0x%04X: parsing output script 0\n", script); 4570 NV_DEBUG_KMS(dev, "0x%04X: parsing output script 0\n", script);
4591 nouveau_bios_run_init_table(dev, script, dcbent); 4571 nouveau_bios_run_init_table(dev, script, dcbent);
4592 } else 4572 } else
4593 if (pxclk == -1) { 4573 if (pxclk == -1) {
@@ -4597,7 +4577,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
4597 return 1; 4577 return 1;
4598 } 4578 }
4599 4579
4600 NV_TRACE(dev, "0x%04X: parsing output script 1\n", script); 4580 NV_DEBUG_KMS(dev, "0x%04X: parsing output script 1\n", script);
4601 nouveau_bios_run_init_table(dev, script, dcbent); 4581 nouveau_bios_run_init_table(dev, script, dcbent);
4602 } else 4582 } else
4603 if (pxclk == -2) { 4583 if (pxclk == -2) {
@@ -4610,7 +4590,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
4610 return 1; 4590 return 1;
4611 } 4591 }
4612 4592
4613 NV_TRACE(dev, "0x%04X: parsing output script 2\n", script); 4593 NV_DEBUG_KMS(dev, "0x%04X: parsing output script 2\n", script);
4614 nouveau_bios_run_init_table(dev, script, dcbent); 4594 nouveau_bios_run_init_table(dev, script, dcbent);
4615 } else 4595 } else
4616 if (pxclk > 0) { 4596 if (pxclk > 0) {
@@ -4622,7 +4602,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
4622 return 1; 4602 return 1;
4623 } 4603 }
4624 4604
4625 NV_TRACE(dev, "0x%04X: parsing clock script 0\n", script); 4605 NV_DEBUG_KMS(dev, "0x%04X: parsing clock script 0\n", script);
4626 nouveau_bios_run_init_table(dev, script, dcbent); 4606 nouveau_bios_run_init_table(dev, script, dcbent);
4627 } else 4607 } else
4628 if (pxclk < 0) { 4608 if (pxclk < 0) {
@@ -4634,7 +4614,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
4634 return 1; 4614 return 1;
4635 } 4615 }
4636 4616
4637 NV_TRACE(dev, "0x%04X: parsing clock script 1\n", script); 4617 NV_DEBUG_KMS(dev, "0x%04X: parsing clock script 1\n", script);
4638 nouveau_bios_run_init_table(dev, script, dcbent); 4618 nouveau_bios_run_init_table(dev, script, dcbent);
4639 } 4619 }
4640 4620
@@ -5357,19 +5337,17 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
5357 } 5337 }
5358 5338
5359 tmdstableptr = ROM16(bios->data[bitentry->offset]); 5339 tmdstableptr = ROM16(bios->data[bitentry->offset]);
5360 5340 if (!tmdstableptr) {
5361 if (tmdstableptr == 0x0) {
5362 NV_ERROR(dev, "Pointer to TMDS table invalid\n"); 5341 NV_ERROR(dev, "Pointer to TMDS table invalid\n");
5363 return -EINVAL; 5342 return -EINVAL;
5364 } 5343 }
5365 5344
5345 NV_INFO(dev, "TMDS table version %d.%d\n",
5346 bios->data[tmdstableptr] >> 4, bios->data[tmdstableptr] & 0xf);
5347
5366 /* nv50+ has v2.0, but we don't parse it atm */ 5348 /* nv50+ has v2.0, but we don't parse it atm */
5367 if (bios->data[tmdstableptr] != 0x11) { 5349 if (bios->data[tmdstableptr] != 0x11)
5368 NV_WARN(dev,
5369 "TMDS table revision %d.%d not currently supported\n",
5370 bios->data[tmdstableptr] >> 4, bios->data[tmdstableptr] & 0xf);
5371 return -ENOSYS; 5350 return -ENOSYS;
5372 }
5373 5351
5374 /* 5352 /*
5375 * These two scripts are odd: they don't seem to get run even when 5353 * These two scripts are odd: they don't seem to get run even when
@@ -5809,6 +5787,20 @@ parse_dcb_gpio_table(struct nvbios *bios)
5809 gpio->line = tvdac_gpio[1] >> 4; 5787 gpio->line = tvdac_gpio[1] >> 4;
5810 gpio->invert = tvdac_gpio[0] & 2; 5788 gpio->invert = tvdac_gpio[0] & 2;
5811 } 5789 }
5790 } else {
5791 /*
5792 * No systematic way to store GPIO info on pre-v2.2
5793 * DCBs, try to match the PCI device IDs.
5794 */
5795
5796 /* Apple iMac G4 NV18 */
5797 if (nv_match_device(dev, 0x0189, 0x10de, 0x0010)) {
5798 struct dcb_gpio_entry *gpio = new_gpio_entry(bios);
5799
5800 gpio->tag = DCB_GPIO_TVDAC0;
5801 gpio->line = 4;
5802 }
5803
5812 } 5804 }
5813 5805
5814 if (!gpio_table_ptr) 5806 if (!gpio_table_ptr)
@@ -5884,9 +5876,7 @@ apply_dcb_connector_quirks(struct nvbios *bios, int idx)
5884 struct drm_device *dev = bios->dev; 5876 struct drm_device *dev = bios->dev;
5885 5877
5886 /* Gigabyte NX85T */ 5878 /* Gigabyte NX85T */
5887 if ((dev->pdev->device == 0x0421) && 5879 if (nv_match_device(dev, 0x0421, 0x1458, 0x344c)) {
5888 (dev->pdev->subsystem_vendor == 0x1458) &&
5889 (dev->pdev->subsystem_device == 0x344c)) {
5890 if (cte->type == DCB_CONNECTOR_HDMI_1) 5880 if (cte->type == DCB_CONNECTOR_HDMI_1)
5891 cte->type = DCB_CONNECTOR_DVI_I; 5881 cte->type = DCB_CONNECTOR_DVI_I;
5892 } 5882 }
@@ -6139,7 +6129,7 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
6139 entry->tmdsconf.slave_addr = (conf & 0x00000070) >> 4; 6129 entry->tmdsconf.slave_addr = (conf & 0x00000070) >> 4;
6140 6130
6141 break; 6131 break;
6142 case 0xe: 6132 case OUTPUT_EOL:
6143 /* weird g80 mobile type that "nv" treats as a terminator */ 6133 /* weird g80 mobile type that "nv" treats as a terminator */
6144 dcb->entries--; 6134 dcb->entries--;
6145 return false; 6135 return false;
@@ -6176,22 +6166,14 @@ parse_dcb15_entry(struct drm_device *dev, struct dcb_table *dcb,
6176 entry->type = OUTPUT_TV; 6166 entry->type = OUTPUT_TV;
6177 break; 6167 break;
6178 case 2: 6168 case 2:
6179 case 3:
6180 entry->type = OUTPUT_LVDS;
6181 break;
6182 case 4: 6169 case 4:
6183 switch ((conn & 0x000000f0) >> 4) { 6170 if (conn & 0x10)
6184 case 0:
6185 entry->type = OUTPUT_TMDS;
6186 break;
6187 case 1:
6188 entry->type = OUTPUT_LVDS; 6171 entry->type = OUTPUT_LVDS;
6189 break; 6172 else
6190 default: 6173 entry->type = OUTPUT_TMDS;
6191 NV_ERROR(dev, "Unknown DCB subtype 4/%d\n", 6174 break;
6192 (conn & 0x000000f0) >> 4); 6175 case 3:
6193 return false; 6176 entry->type = OUTPUT_LVDS;
6194 }
6195 break; 6177 break;
6196 default: 6178 default:
6197 NV_ERROR(dev, "Unknown DCB type %d\n", conn & 0x0000000f); 6179 NV_ERROR(dev, "Unknown DCB type %d\n", conn & 0x0000000f);
@@ -6307,9 +6289,7 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf)
6307 * nasty problems until this is sorted (assuming it's not a 6289 * nasty problems until this is sorted (assuming it's not a
6308 * VBIOS bug). 6290 * VBIOS bug).
6309 */ 6291 */
6310 if ((dev->pdev->device == 0x040d) && 6292 if (nv_match_device(dev, 0x040d, 0x1028, 0x019b)) {
6311 (dev->pdev->subsystem_vendor == 0x1028) &&
6312 (dev->pdev->subsystem_device == 0x019b)) {
6313 if (*conn == 0x02026312 && *conf == 0x00000020) 6293 if (*conn == 0x02026312 && *conf == 0x00000020)
6314 return false; 6294 return false;
6315 } 6295 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index fd14dfd3d780..c1de2f3fcb0e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -95,6 +95,7 @@ enum dcb_type {
95 OUTPUT_TMDS = 2, 95 OUTPUT_TMDS = 2,
96 OUTPUT_LVDS = 3, 96 OUTPUT_LVDS = 3,
97 OUTPUT_DP = 6, 97 OUTPUT_DP = 6,
98 OUTPUT_EOL = 14, /* DCB 4.0+, appears to be end-of-list */
98 OUTPUT_ANY = -1 99 OUTPUT_ANY = -1
99}; 100};
100 101
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 84f85183d041..f6f44779d82f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -36,6 +36,21 @@
36#include <linux/log2.h> 36#include <linux/log2.h>
37#include <linux/slab.h> 37#include <linux/slab.h>
38 38
39int
40nouveau_bo_sync_gpu(struct nouveau_bo *nvbo, struct nouveau_channel *chan)
41{
42 struct nouveau_fence *prev_fence = nvbo->bo.sync_obj;
43 int ret;
44
45 if (!prev_fence || nouveau_fence_channel(prev_fence) == chan)
46 return 0;
47
48 spin_lock(&nvbo->bo.lock);
49 ret = ttm_bo_wait(&nvbo->bo, false, false, false);
50 spin_unlock(&nvbo->bo.lock);
51 return ret;
52}
53
39static void 54static void
40nouveau_bo_del_ttm(struct ttm_buffer_object *bo) 55nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
41{ 56{
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index 90fdcda332be..0480f064f2c1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -426,18 +426,18 @@ nouveau_ioctl_fifo_free(struct drm_device *dev, void *data,
426 ***********************************/ 426 ***********************************/
427 427
428struct drm_ioctl_desc nouveau_ioctls[] = { 428struct drm_ioctl_desc nouveau_ioctls[] = {
429 DRM_IOCTL_DEF(DRM_NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH), 429 DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH),
430 DRM_IOCTL_DEF(DRM_NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 430 DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
431 DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH), 431 DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH),
432 DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_AUTH), 432 DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_AUTH),
433 DRM_IOCTL_DEF(DRM_NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_AUTH), 433 DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_AUTH),
434 DRM_IOCTL_DEF(DRM_NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_AUTH), 434 DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_AUTH),
435 DRM_IOCTL_DEF(DRM_NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH), 435 DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH),
436 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH), 436 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH),
437 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH), 437 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH),
438 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH), 438 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH),
439 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH), 439 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH),
440 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH), 440 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH),
441}; 441};
442 442
443int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls); 443int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index b1b22baf1428..fc737037f751 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -104,7 +104,7 @@ nouveau_connector_ddc_detect(struct drm_connector *connector,
104 int i; 104 int i;
105 105
106 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { 106 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
107 struct nouveau_i2c_chan *i2c; 107 struct nouveau_i2c_chan *i2c = NULL;
108 struct nouveau_encoder *nv_encoder; 108 struct nouveau_encoder *nv_encoder;
109 struct drm_mode_object *obj; 109 struct drm_mode_object *obj;
110 int id; 110 int id;
@@ -117,7 +117,9 @@ nouveau_connector_ddc_detect(struct drm_connector *connector,
117 if (!obj) 117 if (!obj)
118 continue; 118 continue;
119 nv_encoder = nouveau_encoder(obj_to_encoder(obj)); 119 nv_encoder = nouveau_encoder(obj_to_encoder(obj));
120 i2c = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index); 120
121 if (nv_encoder->dcb->i2c_index < 0xf)
122 i2c = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
121 123
122 if (i2c && nouveau_probe_i2c_addr(i2c, 0x50)) { 124 if (i2c && nouveau_probe_i2c_addr(i2c, 0x50)) {
123 *pnv_encoder = nv_encoder; 125 *pnv_encoder = nv_encoder;
@@ -166,7 +168,7 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
166} 168}
167 169
168static enum drm_connector_status 170static enum drm_connector_status
169nouveau_connector_detect(struct drm_connector *connector) 171nouveau_connector_detect(struct drm_connector *connector, bool force)
170{ 172{
171 struct drm_device *dev = connector->dev; 173 struct drm_device *dev = connector->dev;
172 struct nouveau_connector *nv_connector = nouveau_connector(connector); 174 struct nouveau_connector *nv_connector = nouveau_connector(connector);
@@ -244,7 +246,7 @@ detect_analog:
244} 246}
245 247
246static enum drm_connector_status 248static enum drm_connector_status
247nouveau_connector_detect_lvds(struct drm_connector *connector) 249nouveau_connector_detect_lvds(struct drm_connector *connector, bool force)
248{ 250{
249 struct drm_device *dev = connector->dev; 251 struct drm_device *dev = connector->dev;
250 struct drm_nouveau_private *dev_priv = dev->dev_private; 252 struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -265,7 +267,7 @@ nouveau_connector_detect_lvds(struct drm_connector *connector)
265 267
266 /* Try retrieving EDID via DDC */ 268 /* Try retrieving EDID via DDC */
267 if (!dev_priv->vbios.fp_no_ddc) { 269 if (!dev_priv->vbios.fp_no_ddc) {
268 status = nouveau_connector_detect(connector); 270 status = nouveau_connector_detect(connector, force);
269 if (status == connector_status_connected) 271 if (status == connector_status_connected)
270 goto out; 272 goto out;
271 } 273 }
@@ -556,8 +558,10 @@ nouveau_connector_get_modes(struct drm_connector *connector)
556 if (nv_encoder->dcb->type == OUTPUT_LVDS && 558 if (nv_encoder->dcb->type == OUTPUT_LVDS &&
557 (nv_encoder->dcb->lvdsconf.use_straps_for_mode || 559 (nv_encoder->dcb->lvdsconf.use_straps_for_mode ||
558 dev_priv->vbios.fp_no_ddc) && nouveau_bios_fp_mode(dev, NULL)) { 560 dev_priv->vbios.fp_no_ddc) && nouveau_bios_fp_mode(dev, NULL)) {
559 nv_connector->native_mode = drm_mode_create(dev); 561 struct drm_display_mode mode;
560 nouveau_bios_fp_mode(dev, nv_connector->native_mode); 562
563 nouveau_bios_fp_mode(dev, &mode);
564 nv_connector->native_mode = drm_mode_duplicate(dev, &mode);
561 } 565 }
562 566
563 /* Find the native mode if this is a digital panel, if we didn't 567 /* Find the native mode if this is a digital panel, if we didn't
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index e424bf74d706..b1be617373b6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -1165,6 +1165,7 @@ extern u16 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index);
1165extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val); 1165extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val);
1166extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index); 1166extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index);
1167extern void nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val); 1167extern void nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val);
1168extern int nouveau_bo_sync_gpu(struct nouveau_bo *, struct nouveau_channel *);
1168 1169
1169/* nouveau_fence.c */ 1170/* nouveau_fence.c */
1170struct nouveau_fence; 1171struct nouveau_fence;
@@ -1388,6 +1389,15 @@ nv_two_reg_pll(struct drm_device *dev)
1388 return false; 1389 return false;
1389} 1390}
1390 1391
1392static inline bool
1393nv_match_device(struct drm_device *dev, unsigned device,
1394 unsigned sub_vendor, unsigned sub_device)
1395{
1396 return dev->pdev->device == device &&
1397 dev->pdev->subsystem_vendor == sub_vendor &&
1398 dev->pdev->subsystem_device == sub_device;
1399}
1400
1391#define NV_SW 0x0000506e 1401#define NV_SW 0x0000506e
1392#define NV_SW_DMA_SEMAPHORE 0x00000060 1402#define NV_SW_DMA_SEMAPHORE 0x00000060
1393#define NV_SW_SEMAPHORE_OFFSET 0x00000064 1403#define NV_SW_SEMAPHORE_OFFSET 0x00000064
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 6b208ffafa8d..87ac21ec23d2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -64,16 +64,17 @@ nouveau_fence_update(struct nouveau_channel *chan)
64 struct nouveau_fence *fence; 64 struct nouveau_fence *fence;
65 uint32_t sequence; 65 uint32_t sequence;
66 66
67 spin_lock(&chan->fence.lock);
68
67 if (USE_REFCNT) 69 if (USE_REFCNT)
68 sequence = nvchan_rd32(chan, 0x48); 70 sequence = nvchan_rd32(chan, 0x48);
69 else 71 else
70 sequence = atomic_read(&chan->fence.last_sequence_irq); 72 sequence = atomic_read(&chan->fence.last_sequence_irq);
71 73
72 if (chan->fence.sequence_ack == sequence) 74 if (chan->fence.sequence_ack == sequence)
73 return; 75 goto out;
74 chan->fence.sequence_ack = sequence; 76 chan->fence.sequence_ack = sequence;
75 77
76 spin_lock(&chan->fence.lock);
77 list_for_each_safe(entry, tmp, &chan->fence.pending) { 78 list_for_each_safe(entry, tmp, &chan->fence.pending) {
78 fence = list_entry(entry, struct nouveau_fence, entry); 79 fence = list_entry(entry, struct nouveau_fence, entry);
79 80
@@ -85,6 +86,7 @@ nouveau_fence_update(struct nouveau_channel *chan)
85 if (sequence == chan->fence.sequence_ack) 86 if (sequence == chan->fence.sequence_ack)
86 break; 87 break;
87 } 88 }
89out:
88 spin_unlock(&chan->fence.lock); 90 spin_unlock(&chan->fence.lock);
89} 91}
90 92
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 0f417ac1b696..19620a6709f5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -167,11 +167,9 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
167 goto out; 167 goto out;
168 168
169 ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle); 169 ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);
170 /* drop reference from allocate - handle holds it now */
171 drm_gem_object_unreference_unlocked(nvbo->gem);
170out: 172out:
171 drm_gem_object_handle_unreference_unlocked(nvbo->gem);
172
173 if (ret)
174 drm_gem_object_unreference_unlocked(nvbo->gem);
175 return ret; 173 return ret;
176} 174}
177 175
@@ -245,7 +243,7 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
245 list_del(&nvbo->entry); 243 list_del(&nvbo->entry);
246 nvbo->reserved_by = NULL; 244 nvbo->reserved_by = NULL;
247 ttm_bo_unreserve(&nvbo->bo); 245 ttm_bo_unreserve(&nvbo->bo);
248 drm_gem_object_unreference(nvbo->gem); 246 drm_gem_object_unreference_unlocked(nvbo->gem);
249 } 247 }
250} 248}
251 249
@@ -300,7 +298,7 @@ retry:
300 validate_fini(op, NULL); 298 validate_fini(op, NULL);
301 if (ret == -EAGAIN) 299 if (ret == -EAGAIN)
302 ret = ttm_bo_wait_unreserved(&nvbo->bo, false); 300 ret = ttm_bo_wait_unreserved(&nvbo->bo, false);
303 drm_gem_object_unreference(gem); 301 drm_gem_object_unreference_unlocked(gem);
304 if (ret) { 302 if (ret) {
305 NV_ERROR(dev, "fail reserve\n"); 303 NV_ERROR(dev, "fail reserve\n");
306 return ret; 304 return ret;
@@ -337,7 +335,9 @@ retry:
337 return -EINVAL; 335 return -EINVAL;
338 } 336 }
339 337
338 mutex_unlock(&drm_global_mutex);
340 ret = ttm_bo_wait_cpu(&nvbo->bo, false); 339 ret = ttm_bo_wait_cpu(&nvbo->bo, false);
340 mutex_lock(&drm_global_mutex);
341 if (ret) { 341 if (ret) {
342 NV_ERROR(dev, "fail wait_cpu\n"); 342 NV_ERROR(dev, "fail wait_cpu\n");
343 return ret; 343 return ret;
@@ -361,16 +361,11 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
361 361
362 list_for_each_entry(nvbo, list, entry) { 362 list_for_each_entry(nvbo, list, entry) {
363 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index]; 363 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
364 struct nouveau_fence *prev_fence = nvbo->bo.sync_obj;
365 364
366 if (prev_fence && nouveau_fence_channel(prev_fence) != chan) { 365 ret = nouveau_bo_sync_gpu(nvbo, chan);
367 spin_lock(&nvbo->bo.lock); 366 if (unlikely(ret)) {
368 ret = ttm_bo_wait(&nvbo->bo, false, false, false); 367 NV_ERROR(dev, "fail pre-validate sync\n");
369 spin_unlock(&nvbo->bo.lock); 368 return ret;
370 if (unlikely(ret)) {
371 NV_ERROR(dev, "fail wait other chan\n");
372 return ret;
373 }
374 } 369 }
375 370
376 ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains, 371 ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains,
@@ -381,7 +376,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
381 return ret; 376 return ret;
382 } 377 }
383 378
384 nvbo->channel = chan; 379 nvbo->channel = (b->read_domains & (1 << 31)) ? NULL : chan;
385 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, 380 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
386 false, false, false); 381 false, false, false);
387 nvbo->channel = NULL; 382 nvbo->channel = NULL;
@@ -390,6 +385,12 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
390 return ret; 385 return ret;
391 } 386 }
392 387
388 ret = nouveau_bo_sync_gpu(nvbo, chan);
389 if (unlikely(ret)) {
390 NV_ERROR(dev, "fail post-validate sync\n");
391 return ret;
392 }
393
393 if (nvbo->bo.offset == b->presumed.offset && 394 if (nvbo->bo.offset == b->presumed.offset &&
394 ((nvbo->bo.mem.mem_type == TTM_PL_VRAM && 395 ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
395 b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) || 396 b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
@@ -613,7 +614,20 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
613 return PTR_ERR(bo); 614 return PTR_ERR(bo);
614 } 615 }
615 616
616 mutex_lock(&dev->struct_mutex); 617 /* Mark push buffers as being used on PFIFO, the validation code
618 * will then make sure that if the pushbuf bo moves, that they
619 * happen on the kernel channel, which will in turn cause a sync
620 * to happen before we try and submit the push buffer.
621 */
622 for (i = 0; i < req->nr_push; i++) {
623 if (push[i].bo_index >= req->nr_buffers) {
624 NV_ERROR(dev, "push %d buffer not in list\n", i);
625 ret = -EINVAL;
626 goto out;
627 }
628
629 bo[push[i].bo_index].read_domains |= (1 << 31);
630 }
617 631
618 /* Validate buffer list */ 632 /* Validate buffer list */
619 ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers, 633 ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
@@ -647,7 +661,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
647 push[i].length); 661 push[i].length);
648 } 662 }
649 } else 663 } else
650 if (dev_priv->card_type >= NV_20) { 664 if (dev_priv->chipset >= 0x25) {
651 ret = RING_SPACE(chan, req->nr_push * 2); 665 ret = RING_SPACE(chan, req->nr_push * 2);
652 if (ret) { 666 if (ret) {
653 NV_ERROR(dev, "cal_space: %d\n", ret); 667 NV_ERROR(dev, "cal_space: %d\n", ret);
@@ -713,7 +727,6 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
713out: 727out:
714 validate_fini(&op, fence); 728 validate_fini(&op, fence);
715 nouveau_fence_unref((void**)&fence); 729 nouveau_fence_unref((void**)&fence);
716 mutex_unlock(&dev->struct_mutex);
717 kfree(bo); 730 kfree(bo);
718 kfree(push); 731 kfree(push);
719 732
@@ -722,7 +735,7 @@ out_next:
722 req->suffix0 = 0x00000000; 735 req->suffix0 = 0x00000000;
723 req->suffix1 = 0x00000000; 736 req->suffix1 = 0x00000000;
724 } else 737 } else
725 if (dev_priv->card_type >= NV_20) { 738 if (dev_priv->chipset >= 0x25) {
726 req->suffix0 = 0x00020000; 739 req->suffix0 = 0x00020000;
727 req->suffix1 = 0x00000000; 740 req->suffix1 = 0x00000000;
728 } else { 741 } else {
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c
index 0bd407ca3d42..84614858728b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_i2c.c
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c
@@ -163,7 +163,7 @@ nouveau_i2c_init(struct drm_device *dev, struct dcb_i2c_entry *entry, int index)
163 if (entry->chan) 163 if (entry->chan)
164 return -EEXIST; 164 return -EEXIST;
165 165
166 if (dev_priv->card_type == NV_C0 && entry->read >= NV50_I2C_PORTS) { 166 if (dev_priv->card_type >= NV_50 && entry->read >= NV50_I2C_PORTS) {
167 NV_ERROR(dev, "unknown i2c port %d\n", entry->read); 167 NV_ERROR(dev, "unknown i2c port %d\n", entry->read);
168 return -EINVAL; 168 return -EINVAL;
169 } 169 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 491767fe4fcf..6b9187d7f67d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -214,6 +214,7 @@ int
214nouveau_sgdma_init(struct drm_device *dev) 214nouveau_sgdma_init(struct drm_device *dev)
215{ 215{
216 struct drm_nouveau_private *dev_priv = dev->dev_private; 216 struct drm_nouveau_private *dev_priv = dev->dev_private;
217 struct pci_dev *pdev = dev->pdev;
217 struct nouveau_gpuobj *gpuobj = NULL; 218 struct nouveau_gpuobj *gpuobj = NULL;
218 uint32_t aper_size, obj_size; 219 uint32_t aper_size, obj_size;
219 int i, ret; 220 int i, ret;
@@ -239,10 +240,19 @@ nouveau_sgdma_init(struct drm_device *dev)
239 240
240 dev_priv->gart_info.sg_dummy_page = 241 dev_priv->gart_info.sg_dummy_page =
241 alloc_page(GFP_KERNEL|__GFP_DMA32); 242 alloc_page(GFP_KERNEL|__GFP_DMA32);
243 if (!dev_priv->gart_info.sg_dummy_page) {
244 nouveau_gpuobj_del(dev, &gpuobj);
245 return -ENOMEM;
246 }
247
242 set_bit(PG_locked, &dev_priv->gart_info.sg_dummy_page->flags); 248 set_bit(PG_locked, &dev_priv->gart_info.sg_dummy_page->flags);
243 dev_priv->gart_info.sg_dummy_bus = 249 dev_priv->gart_info.sg_dummy_bus =
244 pci_map_page(dev->pdev, dev_priv->gart_info.sg_dummy_page, 0, 250 pci_map_page(pdev, dev_priv->gart_info.sg_dummy_page, 0,
245 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 251 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
252 if (pci_dma_mapping_error(pdev, dev_priv->gart_info.sg_dummy_bus)) {
253 nouveau_gpuobj_del(dev, &gpuobj);
254 return -EFAULT;
255 }
246 256
247 if (dev_priv->card_type < NV_50) { 257 if (dev_priv->card_type < NV_50) {
248 /* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and 258 /* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
index a5dcf7685800..0d3206a7046c 100644
--- a/drivers/gpu/drm/nouveau/nv04_dfp.c
+++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
@@ -444,6 +444,7 @@ static void nv04_dfp_commit(struct drm_encoder *encoder)
444 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 444 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
445 struct dcb_entry *dcbe = nv_encoder->dcb; 445 struct dcb_entry *dcbe = nv_encoder->dcb;
446 int head = nouveau_crtc(encoder->crtc)->index; 446 int head = nouveau_crtc(encoder->crtc)->index;
447 struct drm_encoder *slave_encoder;
447 448
448 if (dcbe->type == OUTPUT_TMDS) 449 if (dcbe->type == OUTPUT_TMDS)
449 run_tmds_table(dev, dcbe, head, nv_encoder->mode.clock); 450 run_tmds_table(dev, dcbe, head, nv_encoder->mode.clock);
@@ -462,9 +463,10 @@ static void nv04_dfp_commit(struct drm_encoder *encoder)
462 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000); 463 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000);
463 464
464 /* Init external transmitters */ 465 /* Init external transmitters */
465 if (get_tmds_slave(encoder)) 466 slave_encoder = get_tmds_slave(encoder);
466 get_slave_funcs(get_tmds_slave(encoder))->mode_set( 467 if (slave_encoder)
467 encoder, &nv_encoder->mode, &nv_encoder->mode); 468 get_slave_funcs(slave_encoder)->mode_set(
469 slave_encoder, &nv_encoder->mode, &nv_encoder->mode);
468 470
469 helper->dpms(encoder, DRM_MODE_DPMS_ON); 471 helper->dpms(encoder, DRM_MODE_DPMS_ON);
470 472
@@ -473,6 +475,27 @@ static void nv04_dfp_commit(struct drm_encoder *encoder)
473 nv_crtc->index, '@' + ffs(nv_encoder->dcb->or)); 475 nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
474} 476}
475 477
478static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode)
479{
480#ifdef __powerpc__
481 struct drm_device *dev = encoder->dev;
482
483 /* BIOS scripts usually take care of the backlight, thanks
484 * Apple for your consistency.
485 */
486 if (dev->pci_device == 0x0179 || dev->pci_device == 0x0189 ||
487 dev->pci_device == 0x0329) {
488 if (mode == DRM_MODE_DPMS_ON) {
489 nv_mask(dev, NV_PBUS_DEBUG_DUALHEAD_CTL, 0, 1 << 31);
490 nv_mask(dev, NV_PCRTC_GPIO_EXT, 3, 1);
491 } else {
492 nv_mask(dev, NV_PBUS_DEBUG_DUALHEAD_CTL, 1 << 31, 0);
493 nv_mask(dev, NV_PCRTC_GPIO_EXT, 3, 0);
494 }
495 }
496#endif
497}
498
476static inline bool is_powersaving_dpms(int mode) 499static inline bool is_powersaving_dpms(int mode)
477{ 500{
478 return (mode != DRM_MODE_DPMS_ON); 501 return (mode != DRM_MODE_DPMS_ON);
@@ -520,6 +543,7 @@ static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode)
520 LVDS_PANEL_OFF, 0); 543 LVDS_PANEL_OFF, 0);
521 } 544 }
522 545
546 nv04_dfp_update_backlight(encoder, mode);
523 nv04_dfp_update_fp_control(encoder, mode); 547 nv04_dfp_update_fp_control(encoder, mode);
524 548
525 if (mode == DRM_MODE_DPMS_ON) 549 if (mode == DRM_MODE_DPMS_ON)
@@ -543,6 +567,7 @@ static void nv04_tmds_dpms(struct drm_encoder *encoder, int mode)
543 NV_INFO(dev, "Setting dpms mode %d on tmds encoder (output %d)\n", 567 NV_INFO(dev, "Setting dpms mode %d on tmds encoder (output %d)\n",
544 mode, nv_encoder->dcb->index); 568 mode, nv_encoder->dcb->index);
545 569
570 nv04_dfp_update_backlight(encoder, mode);
546 nv04_dfp_update_fp_control(encoder, mode); 571 nv04_dfp_update_fp_control(encoder, mode);
547} 572}
548 573
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c
index 44fefb0c7083..13cdc05b7c2d 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.c
+++ b/drivers/gpu/drm/nouveau/nv17_tv.c
@@ -121,10 +121,14 @@ static bool
121get_tv_detect_quirks(struct drm_device *dev, uint32_t *pin_mask) 121get_tv_detect_quirks(struct drm_device *dev, uint32_t *pin_mask)
122{ 122{
123 /* Zotac FX5200 */ 123 /* Zotac FX5200 */
124 if (dev->pdev->device == 0x0322 && 124 if (nv_match_device(dev, 0x0322, 0x19da, 0x1035) ||
125 dev->pdev->subsystem_vendor == 0x19da && 125 nv_match_device(dev, 0x0322, 0x19da, 0x2035)) {
126 (dev->pdev->subsystem_device == 0x1035 || 126 *pin_mask = 0xc;
127 dev->pdev->subsystem_device == 0x2035)) { 127 return false;
128 }
129
130 /* MSI nForce2 IGP */
131 if (nv_match_device(dev, 0x01f0, 0x1462, 0x5710)) {
128 *pin_mask = 0xc; 132 *pin_mask = 0xc;
129 return false; 133 return false;
130 } 134 }
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index 37c7b48ab24a..91ef93cf1f35 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -139,6 +139,8 @@ nv50_instmem_init(struct drm_device *dev)
139 chan->file_priv = (struct drm_file *)-2; 139 chan->file_priv = (struct drm_file *)-2;
140 dev_priv->fifos[0] = dev_priv->fifos[127] = chan; 140 dev_priv->fifos[0] = dev_priv->fifos[127] = chan;
141 141
142 INIT_LIST_HEAD(&chan->ramht_refs);
143
142 /* Channel's PRAMIN object + heap */ 144 /* Channel's PRAMIN object + heap */
143 ret = nouveau_gpuobj_new_fake(dev, 0, c_offset, c_size, 0, 145 ret = nouveau_gpuobj_new_fake(dev, 0, c_offset, c_size, 0,
144 NULL, &chan->ramin); 146 NULL, &chan->ramin);
@@ -278,7 +280,7 @@ nv50_instmem_init(struct drm_device *dev)
278 /*XXX: incorrect, but needed to make hash func "work" */ 280 /*XXX: incorrect, but needed to make hash func "work" */
279 dev_priv->ramht_offset = 0x10000; 281 dev_priv->ramht_offset = 0x10000;
280 dev_priv->ramht_bits = 9; 282 dev_priv->ramht_bits = 9;
281 dev_priv->ramht_size = (1 << dev_priv->ramht_bits); 283 dev_priv->ramht_size = (1 << dev_priv->ramht_bits) * 8;
282 return 0; 284 return 0;
283} 285}
284 286
diff --git a/drivers/gpu/drm/nouveau/nvc0_instmem.c b/drivers/gpu/drm/nouveau/nvc0_instmem.c
index 3ab3cdc42173..6b451f864783 100644
--- a/drivers/gpu/drm/nouveau/nvc0_instmem.c
+++ b/drivers/gpu/drm/nouveau/nvc0_instmem.c
@@ -142,14 +142,16 @@ int
142nvc0_instmem_suspend(struct drm_device *dev) 142nvc0_instmem_suspend(struct drm_device *dev)
143{ 143{
144 struct drm_nouveau_private *dev_priv = dev->dev_private; 144 struct drm_nouveau_private *dev_priv = dev->dev_private;
145 u32 *buf;
145 int i; 146 int i;
146 147
147 dev_priv->susres.ramin_copy = vmalloc(65536); 148 dev_priv->susres.ramin_copy = vmalloc(65536);
148 if (!dev_priv->susres.ramin_copy) 149 if (!dev_priv->susres.ramin_copy)
149 return -ENOMEM; 150 return -ENOMEM;
151 buf = dev_priv->susres.ramin_copy;
150 152
151 for (i = 0x700000; i < 0x710000; i += 4) 153 for (i = 0; i < 65536; i += 4)
152 dev_priv->susres.ramin_copy[i/4] = nv_rd32(dev, i); 154 buf[i/4] = nv_rd32(dev, NV04_PRAMIN + i);
153 return 0; 155 return 0;
154} 156}
155 157
@@ -157,14 +159,15 @@ void
157nvc0_instmem_resume(struct drm_device *dev) 159nvc0_instmem_resume(struct drm_device *dev)
158{ 160{
159 struct drm_nouveau_private *dev_priv = dev->dev_private; 161 struct drm_nouveau_private *dev_priv = dev->dev_private;
162 u32 *buf = dev_priv->susres.ramin_copy;
160 u64 chan; 163 u64 chan;
161 int i; 164 int i;
162 165
163 chan = dev_priv->vram_size - dev_priv->ramin_rsvd_vram; 166 chan = dev_priv->vram_size - dev_priv->ramin_rsvd_vram;
164 nv_wr32(dev, 0x001700, chan >> 16); 167 nv_wr32(dev, 0x001700, chan >> 16);
165 168
166 for (i = 0x700000; i < 0x710000; i += 4) 169 for (i = 0; i < 65536; i += 4)
167 nv_wr32(dev, i, dev_priv->susres.ramin_copy[i/4]); 170 nv_wr32(dev, NV04_PRAMIN + i, buf[i/4]);
168 vfree(dev_priv->susres.ramin_copy); 171 vfree(dev_priv->susres.ramin_copy);
169 dev_priv->susres.ramin_copy = NULL; 172 dev_priv->susres.ramin_copy = NULL;
170 173
@@ -221,7 +224,7 @@ nvc0_instmem_init(struct drm_device *dev)
221 /*XXX: incorrect, but needed to make hash func "work" */ 224 /*XXX: incorrect, but needed to make hash func "work" */
222 dev_priv->ramht_offset = 0x10000; 225 dev_priv->ramht_offset = 0x10000;
223 dev_priv->ramht_bits = 9; 226 dev_priv->ramht_bits = 9;
224 dev_priv->ramht_size = (1 << dev_priv->ramht_bits); 227 dev_priv->ramht_size = (1 << dev_priv->ramht_bits) * 8;
225 return 0; 228 return 0;
226} 229}
227 230
diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
index 077af1f2f9b4..a9e33ce65918 100644
--- a/drivers/gpu/drm/r128/r128_state.c
+++ b/drivers/gpu/drm/r128/r128_state.c
@@ -1639,30 +1639,29 @@ void r128_driver_preclose(struct drm_device *dev, struct drm_file *file_priv)
1639 r128_do_cleanup_pageflip(dev); 1639 r128_do_cleanup_pageflip(dev);
1640 } 1640 }
1641} 1641}
1642
1643void r128_driver_lastclose(struct drm_device *dev) 1642void r128_driver_lastclose(struct drm_device *dev)
1644{ 1643{
1645 r128_do_cleanup_cce(dev); 1644 r128_do_cleanup_cce(dev);
1646} 1645}
1647 1646
1648struct drm_ioctl_desc r128_ioctls[] = { 1647struct drm_ioctl_desc r128_ioctls[] = {
1649 DRM_IOCTL_DEF(DRM_R128_INIT, r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1648 DRM_IOCTL_DEF_DRV(R128_INIT, r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1650 DRM_IOCTL_DEF(DRM_R128_CCE_START, r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1649 DRM_IOCTL_DEF_DRV(R128_CCE_START, r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1651 DRM_IOCTL_DEF(DRM_R128_CCE_STOP, r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1650 DRM_IOCTL_DEF_DRV(R128_CCE_STOP, r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1652 DRM_IOCTL_DEF(DRM_R128_CCE_RESET, r128_cce_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1651 DRM_IOCTL_DEF_DRV(R128_CCE_RESET, r128_cce_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1653 DRM_IOCTL_DEF(DRM_R128_CCE_IDLE, r128_cce_idle, DRM_AUTH), 1652 DRM_IOCTL_DEF_DRV(R128_CCE_IDLE, r128_cce_idle, DRM_AUTH),
1654 DRM_IOCTL_DEF(DRM_R128_RESET, r128_engine_reset, DRM_AUTH), 1653 DRM_IOCTL_DEF_DRV(R128_RESET, r128_engine_reset, DRM_AUTH),
1655 DRM_IOCTL_DEF(DRM_R128_FULLSCREEN, r128_fullscreen, DRM_AUTH), 1654 DRM_IOCTL_DEF_DRV(R128_FULLSCREEN, r128_fullscreen, DRM_AUTH),
1656 DRM_IOCTL_DEF(DRM_R128_SWAP, r128_cce_swap, DRM_AUTH), 1655 DRM_IOCTL_DEF_DRV(R128_SWAP, r128_cce_swap, DRM_AUTH),
1657 DRM_IOCTL_DEF(DRM_R128_FLIP, r128_cce_flip, DRM_AUTH), 1656 DRM_IOCTL_DEF_DRV(R128_FLIP, r128_cce_flip, DRM_AUTH),
1658 DRM_IOCTL_DEF(DRM_R128_CLEAR, r128_cce_clear, DRM_AUTH), 1657 DRM_IOCTL_DEF_DRV(R128_CLEAR, r128_cce_clear, DRM_AUTH),
1659 DRM_IOCTL_DEF(DRM_R128_VERTEX, r128_cce_vertex, DRM_AUTH), 1658 DRM_IOCTL_DEF_DRV(R128_VERTEX, r128_cce_vertex, DRM_AUTH),
1660 DRM_IOCTL_DEF(DRM_R128_INDICES, r128_cce_indices, DRM_AUTH), 1659 DRM_IOCTL_DEF_DRV(R128_INDICES, r128_cce_indices, DRM_AUTH),
1661 DRM_IOCTL_DEF(DRM_R128_BLIT, r128_cce_blit, DRM_AUTH), 1660 DRM_IOCTL_DEF_DRV(R128_BLIT, r128_cce_blit, DRM_AUTH),
1662 DRM_IOCTL_DEF(DRM_R128_DEPTH, r128_cce_depth, DRM_AUTH), 1661 DRM_IOCTL_DEF_DRV(R128_DEPTH, r128_cce_depth, DRM_AUTH),
1663 DRM_IOCTL_DEF(DRM_R128_STIPPLE, r128_cce_stipple, DRM_AUTH), 1662 DRM_IOCTL_DEF_DRV(R128_STIPPLE, r128_cce_stipple, DRM_AUTH),
1664 DRM_IOCTL_DEF(DRM_R128_INDIRECT, r128_cce_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1663 DRM_IOCTL_DEF_DRV(R128_INDIRECT, r128_cce_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1665 DRM_IOCTL_DEF(DRM_R128_GETPARAM, r128_getparam, DRM_AUTH), 1664 DRM_IOCTL_DEF_DRV(R128_GETPARAM, r128_getparam, DRM_AUTH),
1666}; 1665};
1667 1666
1668int r128_max_ioctl = DRM_ARRAY_SIZE(r128_ioctls); 1667int r128_max_ioctl = DRM_ARRAY_SIZE(r128_ioctls);
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index 1bc72c3190a9..fe359a239df3 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -4999,7 +4999,7 @@ typedef struct _SW_I2C_IO_DATA_PARAMETERS
4999#define SW_I2C_CNTL_WRITE1BIT 6 4999#define SW_I2C_CNTL_WRITE1BIT 6
5000 5000
5001//==============================VESA definition Portion=============================== 5001//==============================VESA definition Portion===============================
5002#define VESA_OEM_PRODUCT_REV '01.00' 5002#define VESA_OEM_PRODUCT_REV "01.00"
5003#define VESA_MODE_ATTRIBUTE_MODE_SUPPORT 0xBB //refer to VBE spec p.32, no TTY support 5003#define VESA_MODE_ATTRIBUTE_MODE_SUPPORT 0xBB //refer to VBE spec p.32, no TTY support
5004#define VESA_MODE_WIN_ATTRIBUTE 7 5004#define VESA_MODE_WIN_ATTRIBUTE 7
5005#define VESA_WIN_SIZE 64 5005#define VESA_WIN_SIZE 64
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 12ad512bd3d3..cd0290f946cf 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -332,6 +332,11 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc,
332 args.usV_SyncWidth = 332 args.usV_SyncWidth =
333 cpu_to_le16(mode->crtc_vsync_end - mode->crtc_vsync_start); 333 cpu_to_le16(mode->crtc_vsync_end - mode->crtc_vsync_start);
334 334
335 args.ucOverscanRight = radeon_crtc->h_border;
336 args.ucOverscanLeft = radeon_crtc->h_border;
337 args.ucOverscanBottom = radeon_crtc->v_border;
338 args.ucOverscanTop = radeon_crtc->v_border;
339
335 if (mode->flags & DRM_MODE_FLAG_NVSYNC) 340 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
336 misc |= ATOM_VSYNC_POLARITY; 341 misc |= ATOM_VSYNC_POLARITY;
337 if (mode->flags & DRM_MODE_FLAG_NHSYNC) 342 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
@@ -471,6 +476,8 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
471 struct radeon_encoder *radeon_encoder = NULL; 476 struct radeon_encoder *radeon_encoder = NULL;
472 u32 adjusted_clock = mode->clock; 477 u32 adjusted_clock = mode->clock;
473 int encoder_mode = 0; 478 int encoder_mode = 0;
479 u32 dp_clock = mode->clock;
480 int bpc = 8;
474 481
475 /* reset the pll flags */ 482 /* reset the pll flags */
476 pll->flags = 0; 483 pll->flags = 0;
@@ -513,6 +520,17 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
513 if (encoder->crtc == crtc) { 520 if (encoder->crtc == crtc) {
514 radeon_encoder = to_radeon_encoder(encoder); 521 radeon_encoder = to_radeon_encoder(encoder);
515 encoder_mode = atombios_get_encoder_mode(encoder); 522 encoder_mode = atombios_get_encoder_mode(encoder);
523 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) {
524 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
525 if (connector) {
526 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
527 struct radeon_connector_atom_dig *dig_connector =
528 radeon_connector->con_priv;
529
530 dp_clock = dig_connector->dp_clock;
531 }
532 }
533
516 if (ASIC_IS_AVIVO(rdev)) { 534 if (ASIC_IS_AVIVO(rdev)) {
517 /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ 535 /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
518 if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1) 536 if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
@@ -521,6 +539,21 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
521 pll->algo = PLL_ALGO_LEGACY; 539 pll->algo = PLL_ALGO_LEGACY;
522 pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER; 540 pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER;
523 } 541 }
542 /* There is some evidence (often anecdotal) that RV515/RV620 LVDS
543 * (on some boards at least) prefers the legacy algo. I'm not
544 * sure whether this should handled generically or on a
545 * case-by-case quirk basis. Both algos should work fine in the
546 * majority of cases.
547 */
548 if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) &&
549 ((rdev->family == CHIP_RV515) ||
550 (rdev->family == CHIP_RV620))) {
551 /* allow the user to overrride just in case */
552 if (radeon_new_pll == 1)
553 pll->algo = PLL_ALGO_NEW;
554 else
555 pll->algo = PLL_ALGO_LEGACY;
556 }
524 } else { 557 } else {
525 if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) 558 if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
526 pll->flags |= RADEON_PLL_NO_ODD_POST_DIV; 559 pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
@@ -555,6 +588,14 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
555 args.v1.usPixelClock = cpu_to_le16(mode->clock / 10); 588 args.v1.usPixelClock = cpu_to_le16(mode->clock / 10);
556 args.v1.ucTransmitterID = radeon_encoder->encoder_id; 589 args.v1.ucTransmitterID = radeon_encoder->encoder_id;
557 args.v1.ucEncodeMode = encoder_mode; 590 args.v1.ucEncodeMode = encoder_mode;
591 if (encoder_mode == ATOM_ENCODER_MODE_DP) {
592 /* may want to enable SS on DP eventually */
593 /* args.v1.ucConfig |=
594 ADJUST_DISPLAY_CONFIG_SS_ENABLE;*/
595 } else if (encoder_mode == ATOM_ENCODER_MODE_LVDS) {
596 args.v1.ucConfig |=
597 ADJUST_DISPLAY_CONFIG_SS_ENABLE;
598 }
558 599
559 atom_execute_table(rdev->mode_info.atom_context, 600 atom_execute_table(rdev->mode_info.atom_context,
560 index, (uint32_t *)&args); 601 index, (uint32_t *)&args);
@@ -568,10 +609,20 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
568 if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { 609 if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
569 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 610 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
570 611
571 if (encoder_mode == ATOM_ENCODER_MODE_DP) 612 if (encoder_mode == ATOM_ENCODER_MODE_DP) {
613 /* may want to enable SS on DP/eDP eventually */
614 /*args.v3.sInput.ucDispPllConfig |=
615 DISPPLL_CONFIG_SS_ENABLE;*/
572 args.v3.sInput.ucDispPllConfig |= 616 args.v3.sInput.ucDispPllConfig |=
573 DISPPLL_CONFIG_COHERENT_MODE; 617 DISPPLL_CONFIG_COHERENT_MODE;
574 else { 618 /* 16200 or 27000 */
619 args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10);
620 } else {
621 if (encoder_mode == ATOM_ENCODER_MODE_HDMI) {
622 /* deep color support */
623 args.v3.sInput.usPixelClock =
624 cpu_to_le16((mode->clock * bpc / 8) / 10);
625 }
575 if (dig->coherent_mode) 626 if (dig->coherent_mode)
576 args.v3.sInput.ucDispPllConfig |= 627 args.v3.sInput.ucDispPllConfig |=
577 DISPPLL_CONFIG_COHERENT_MODE; 628 DISPPLL_CONFIG_COHERENT_MODE;
@@ -580,13 +631,19 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
580 DISPPLL_CONFIG_DUAL_LINK; 631 DISPPLL_CONFIG_DUAL_LINK;
581 } 632 }
582 } else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { 633 } else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
583 /* may want to enable SS on DP/eDP eventually */ 634 if (encoder_mode == ATOM_ENCODER_MODE_DP) {
584 /*args.v3.sInput.ucDispPllConfig |= 635 /* may want to enable SS on DP/eDP eventually */
585 DISPPLL_CONFIG_SS_ENABLE;*/ 636 /*args.v3.sInput.ucDispPllConfig |=
586 if (encoder_mode == ATOM_ENCODER_MODE_DP) 637 DISPPLL_CONFIG_SS_ENABLE;*/
587 args.v3.sInput.ucDispPllConfig |= 638 args.v3.sInput.ucDispPllConfig |=
588 DISPPLL_CONFIG_COHERENT_MODE; 639 DISPPLL_CONFIG_COHERENT_MODE;
589 else { 640 /* 16200 or 27000 */
641 args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10);
642 } else if (encoder_mode == ATOM_ENCODER_MODE_LVDS) {
643 /* want to enable SS on LVDS eventually */
644 /*args.v3.sInput.ucDispPllConfig |=
645 DISPPLL_CONFIG_SS_ENABLE;*/
646 } else {
590 if (mode->clock > 165000) 647 if (mode->clock > 165000)
591 args.v3.sInput.ucDispPllConfig |= 648 args.v3.sInput.ucDispPllConfig |=
592 DISPPLL_CONFIG_DUAL_LINK; 649 DISPPLL_CONFIG_DUAL_LINK;
@@ -1019,11 +1076,11 @@ static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y,
1019 1076
1020 if (rdev->family >= CHIP_RV770) { 1077 if (rdev->family >= CHIP_RV770) {
1021 if (radeon_crtc->crtc_id) { 1078 if (radeon_crtc->crtc_id) {
1022 WREG32(R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, 0); 1079 WREG32(R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
1023 WREG32(R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, 0); 1080 WREG32(R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
1024 } else { 1081 } else {
1025 WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, 0); 1082 WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
1026 WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, 0); 1083 WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
1027 } 1084 }
1028 } 1085 }
1029 WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, 1086 WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
@@ -1160,8 +1217,18 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
1160 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 1217 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1161 struct drm_device *dev = crtc->dev; 1218 struct drm_device *dev = crtc->dev;
1162 struct radeon_device *rdev = dev->dev_private; 1219 struct radeon_device *rdev = dev->dev_private;
1220 struct drm_encoder *encoder;
1221 bool is_tvcv = false;
1163 1222
1164 /* TODO color tiling */ 1223 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
1224 /* find tv std */
1225 if (encoder->crtc == crtc) {
1226 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1227 if (radeon_encoder->active_device &
1228 (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
1229 is_tvcv = true;
1230 }
1231 }
1165 1232
1166 atombios_disable_ss(crtc); 1233 atombios_disable_ss(crtc);
1167 /* always set DCPLL */ 1234 /* always set DCPLL */
@@ -1170,9 +1237,14 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
1170 atombios_crtc_set_pll(crtc, adjusted_mode); 1237 atombios_crtc_set_pll(crtc, adjusted_mode);
1171 atombios_enable_ss(crtc); 1238 atombios_enable_ss(crtc);
1172 1239
1173 if (ASIC_IS_AVIVO(rdev)) 1240 if (ASIC_IS_DCE4(rdev))
1174 atombios_set_crtc_dtd_timing(crtc, adjusted_mode); 1241 atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
1175 else { 1242 else if (ASIC_IS_AVIVO(rdev)) {
1243 if (is_tvcv)
1244 atombios_crtc_set_timing(crtc, adjusted_mode);
1245 else
1246 atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
1247 } else {
1176 atombios_crtc_set_timing(crtc, adjusted_mode); 1248 atombios_crtc_set_timing(crtc, adjusted_mode);
1177 if (radeon_crtc->crtc_id == 0) 1249 if (radeon_crtc->crtc_id == 0)
1178 atombios_set_crtc_dtd_timing(crtc, adjusted_mode); 1250 atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 36e0d4b545e6..4e7778d44b8d 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -610,7 +610,7 @@ void dp_link_train(struct drm_encoder *encoder,
610 enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER; 610 enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER;
611 else 611 else
612 enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER; 612 enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER;
613 if (dig_connector->linkb) 613 if (dig->linkb)
614 enc_id |= ATOM_DP_CONFIG_LINK_B; 614 enc_id |= ATOM_DP_CONFIG_LINK_B;
615 else 615 else
616 enc_id |= ATOM_DP_CONFIG_LINK_A; 616 enc_id |= ATOM_DP_CONFIG_LINK_A;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 957d5067ad9c..2f93d46ae69a 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -675,6 +675,43 @@ static int evergreen_cp_load_microcode(struct radeon_device *rdev)
675 return 0; 675 return 0;
676} 676}
677 677
678static int evergreen_cp_start(struct radeon_device *rdev)
679{
680 int r;
681 uint32_t cp_me;
682
683 r = radeon_ring_lock(rdev, 7);
684 if (r) {
685 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
686 return r;
687 }
688 radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
689 radeon_ring_write(rdev, 0x1);
690 radeon_ring_write(rdev, 0x0);
691 radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1);
692 radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
693 radeon_ring_write(rdev, 0);
694 radeon_ring_write(rdev, 0);
695 radeon_ring_unlock_commit(rdev);
696
697 cp_me = 0xff;
698 WREG32(CP_ME_CNTL, cp_me);
699
700 r = radeon_ring_lock(rdev, 4);
701 if (r) {
702 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
703 return r;
704 }
705 /* init some VGT regs */
706 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
707 radeon_ring_write(rdev, (VGT_VERTEX_REUSE_BLOCK_CNTL - PACKET3_SET_CONTEXT_REG_START) >> 2);
708 radeon_ring_write(rdev, 0xe);
709 radeon_ring_write(rdev, 0x10);
710 radeon_ring_unlock_commit(rdev);
711
712 return 0;
713}
714
678int evergreen_cp_resume(struct radeon_device *rdev) 715int evergreen_cp_resume(struct radeon_device *rdev)
679{ 716{
680 u32 tmp; 717 u32 tmp;
@@ -719,7 +756,7 @@ int evergreen_cp_resume(struct radeon_device *rdev)
719 rdev->cp.rptr = RREG32(CP_RB_RPTR); 756 rdev->cp.rptr = RREG32(CP_RB_RPTR);
720 rdev->cp.wptr = RREG32(CP_RB_WPTR); 757 rdev->cp.wptr = RREG32(CP_RB_WPTR);
721 758
722 r600_cp_start(rdev); 759 evergreen_cp_start(rdev);
723 rdev->cp.ready = true; 760 rdev->cp.ready = true;
724 r = radeon_ring_test(rdev); 761 r = radeon_ring_test(rdev);
725 if (r) { 762 if (r) {
@@ -1100,7 +1137,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1100 1137
1101 WREG32(RCU_IND_INDEX, 0x203); 1138 WREG32(RCU_IND_INDEX, 0x203);
1102 efuse_straps_3 = RREG32(RCU_IND_DATA); 1139 efuse_straps_3 = RREG32(RCU_IND_DATA);
1103 efuse_box_bit_127_124 = (u8)(efuse_straps_3 & 0xF0000000) >> 28; 1140 efuse_box_bit_127_124 = (u8)((efuse_straps_3 & 0xF0000000) >> 28);
1104 1141
1105 switch(efuse_box_bit_127_124) { 1142 switch(efuse_box_bit_127_124) {
1106 case 0x0: 1143 case 0x0:
@@ -1123,14 +1160,25 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1123 EVERGREEN_MAX_BACKENDS_MASK)); 1160 EVERGREEN_MAX_BACKENDS_MASK));
1124 break; 1161 break;
1125 } 1162 }
1126 } else 1163 } else {
1127 gb_backend_map = 1164 switch (rdev->family) {
1128 evergreen_get_tile_pipe_to_backend_map(rdev, 1165 case CHIP_CYPRESS:
1129 rdev->config.evergreen.max_tile_pipes, 1166 case CHIP_HEMLOCK:
1130 rdev->config.evergreen.max_backends, 1167 gb_backend_map = 0x66442200;
1131 ((EVERGREEN_MAX_BACKENDS_MASK << 1168 break;
1132 rdev->config.evergreen.max_backends) & 1169 case CHIP_JUNIPER:
1133 EVERGREEN_MAX_BACKENDS_MASK)); 1170 gb_backend_map = 0x00006420;
1171 break;
1172 default:
1173 gb_backend_map =
1174 evergreen_get_tile_pipe_to_backend_map(rdev,
1175 rdev->config.evergreen.max_tile_pipes,
1176 rdev->config.evergreen.max_backends,
1177 ((EVERGREEN_MAX_BACKENDS_MASK <<
1178 rdev->config.evergreen.max_backends) &
1179 EVERGREEN_MAX_BACKENDS_MASK));
1180 }
1181 }
1134 1182
1135 rdev->config.evergreen.tile_config = gb_addr_config; 1183 rdev->config.evergreen.tile_config = gb_addr_config;
1136 WREG32(GB_BACKEND_MAP, gb_backend_map); 1184 WREG32(GB_BACKEND_MAP, gb_backend_map);
@@ -1359,6 +1407,7 @@ int evergreen_mc_init(struct radeon_device *rdev)
1359 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; 1407 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
1360 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; 1408 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
1361 rdev->mc.visible_vram_size = rdev->mc.aper_size; 1409 rdev->mc.visible_vram_size = rdev->mc.aper_size;
1410 rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
1362 r600_vram_gtt_location(rdev, &rdev->mc); 1411 r600_vram_gtt_location(rdev, &rdev->mc);
1363 radeon_update_bandwidth_info(rdev); 1412 radeon_update_bandwidth_info(rdev);
1364 1413
@@ -1472,7 +1521,7 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev)
1472{ 1521{
1473 u32 tmp; 1522 u32 tmp;
1474 1523
1475 WREG32(CP_INT_CNTL, 0); 1524 WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
1476 WREG32(GRBM_INT_CNTL, 0); 1525 WREG32(GRBM_INT_CNTL, 0);
1477 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); 1526 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
1478 WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); 1527 WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
@@ -2054,11 +2103,6 @@ int evergreen_resume(struct radeon_device *rdev)
2054 */ 2103 */
2055 /* post card */ 2104 /* post card */
2056 atom_asic_init(rdev->mode_info.atom_context); 2105 atom_asic_init(rdev->mode_info.atom_context);
2057 /* Initialize clocks */
2058 r = radeon_clocks_init(rdev);
2059 if (r) {
2060 return r;
2061 }
2062 2106
2063 r = evergreen_startup(rdev); 2107 r = evergreen_startup(rdev);
2064 if (r) { 2108 if (r) {
@@ -2164,9 +2208,6 @@ int evergreen_init(struct radeon_device *rdev)
2164 radeon_surface_init(rdev); 2208 radeon_surface_init(rdev);
2165 /* Initialize clocks */ 2209 /* Initialize clocks */
2166 radeon_get_clock_info(rdev->ddev); 2210 radeon_get_clock_info(rdev->ddev);
2167 r = radeon_clocks_init(rdev);
2168 if (r)
2169 return r;
2170 /* Fence driver */ 2211 /* Fence driver */
2171 r = radeon_fence_driver_init(rdev); 2212 r = radeon_fence_driver_init(rdev);
2172 if (r) 2213 if (r)
@@ -2236,7 +2277,6 @@ void evergreen_fini(struct radeon_device *rdev)
2236 evergreen_pcie_gart_fini(rdev); 2277 evergreen_pcie_gart_fini(rdev);
2237 radeon_gem_fini(rdev); 2278 radeon_gem_fini(rdev);
2238 radeon_fence_driver_fini(rdev); 2279 radeon_fence_driver_fini(rdev);
2239 radeon_clocks_fini(rdev);
2240 radeon_agp_fini(rdev); 2280 radeon_agp_fini(rdev);
2241 radeon_bo_fini(rdev); 2281 radeon_bo_fini(rdev);
2242 radeon_atombios_fini(rdev); 2282 radeon_atombios_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index e817a0bb5eb4..e59422320bb6 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -1030,6 +1030,7 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
1030 return r; 1030 return r;
1031 } 1031 }
1032 rdev->cp.ready = true; 1032 rdev->cp.ready = true;
1033 rdev->mc.active_vram_size = rdev->mc.real_vram_size;
1033 return 0; 1034 return 0;
1034} 1035}
1035 1036
@@ -1047,6 +1048,7 @@ void r100_cp_fini(struct radeon_device *rdev)
1047void r100_cp_disable(struct radeon_device *rdev) 1048void r100_cp_disable(struct radeon_device *rdev)
1048{ 1049{
1049 /* Disable ring */ 1050 /* Disable ring */
1051 rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
1050 rdev->cp.ready = false; 1052 rdev->cp.ready = false;
1051 WREG32(RADEON_CP_CSQ_MODE, 0); 1053 WREG32(RADEON_CP_CSQ_MODE, 0);
1052 WREG32(RADEON_CP_CSQ_CNTL, 0); 1054 WREG32(RADEON_CP_CSQ_CNTL, 0);
@@ -2020,18 +2022,7 @@ bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *l
2020 return false; 2022 return false;
2021 } 2023 }
2022 elapsed = jiffies_to_msecs(cjiffies - lockup->last_jiffies); 2024 elapsed = jiffies_to_msecs(cjiffies - lockup->last_jiffies);
2023 if (elapsed >= 3000) { 2025 if (elapsed >= 10000) {
2024 /* very likely the improbable case where current
2025 * rptr is equal to last recorded, a while ago, rptr
2026 * this is more likely a false positive update tracking
2027 * information which should force us to be recall at
2028 * latter point
2029 */
2030 lockup->last_cp_rptr = cp->rptr;
2031 lockup->last_jiffies = jiffies;
2032 return false;
2033 }
2034 if (elapsed >= 1000) {
2035 dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed); 2026 dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed);
2036 return true; 2027 return true;
2037 } 2028 }
@@ -2306,6 +2297,7 @@ void r100_vram_init_sizes(struct radeon_device *rdev)
2306 /* FIXME we don't use the second aperture yet when we could use it */ 2297 /* FIXME we don't use the second aperture yet when we could use it */
2307 if (rdev->mc.visible_vram_size > rdev->mc.aper_size) 2298 if (rdev->mc.visible_vram_size > rdev->mc.aper_size)
2308 rdev->mc.visible_vram_size = rdev->mc.aper_size; 2299 rdev->mc.visible_vram_size = rdev->mc.aper_size;
2300 rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
2309 config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE); 2301 config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
2310 if (rdev->flags & RADEON_IS_IGP) { 2302 if (rdev->flags & RADEON_IS_IGP) {
2311 uint32_t tom; 2303 uint32_t tom;
@@ -3308,13 +3300,14 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
3308 unsigned long size; 3300 unsigned long size;
3309 unsigned prim_walk; 3301 unsigned prim_walk;
3310 unsigned nverts; 3302 unsigned nverts;
3303 unsigned num_cb = track->num_cb;
3311 3304
3312 for (i = 0; i < track->num_cb; i++) { 3305 if (!track->zb_cb_clear && !track->color_channel_mask &&
3306 !track->blend_read_enable)
3307 num_cb = 0;
3308
3309 for (i = 0; i < num_cb; i++) {
3313 if (track->cb[i].robj == NULL) { 3310 if (track->cb[i].robj == NULL) {
3314 if (!(track->zb_cb_clear || track->color_channel_mask ||
3315 track->blend_read_enable)) {
3316 continue;
3317 }
3318 DRM_ERROR("[drm] No buffer for color buffer %d !\n", i); 3311 DRM_ERROR("[drm] No buffer for color buffer %d !\n", i);
3319 return -EINVAL; 3312 return -EINVAL;
3320 } 3313 }
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index d0ebae9dde25..7b65e4efe8af 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1248,6 +1248,7 @@ int r600_mc_init(struct radeon_device *rdev)
1248 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); 1248 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
1249 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); 1249 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
1250 rdev->mc.visible_vram_size = rdev->mc.aper_size; 1250 rdev->mc.visible_vram_size = rdev->mc.aper_size;
1251 rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
1251 r600_vram_gtt_location(rdev, &rdev->mc); 1252 r600_vram_gtt_location(rdev, &rdev->mc);
1252 1253
1253 if (rdev->flags & RADEON_IS_IGP) { 1254 if (rdev->flags & RADEON_IS_IGP) {
@@ -1917,6 +1918,7 @@ void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
1917 */ 1918 */
1918void r600_cp_stop(struct radeon_device *rdev) 1919void r600_cp_stop(struct radeon_device *rdev)
1919{ 1920{
1921 rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
1920 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); 1922 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1921} 1923}
1922 1924
@@ -2119,10 +2121,7 @@ int r600_cp_start(struct radeon_device *rdev)
2119 } 2121 }
2120 radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5)); 2122 radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
2121 radeon_ring_write(rdev, 0x1); 2123 radeon_ring_write(rdev, 0x1);
2122 if (rdev->family >= CHIP_CEDAR) { 2124 if (rdev->family >= CHIP_RV770) {
2123 radeon_ring_write(rdev, 0x0);
2124 radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1);
2125 } else if (rdev->family >= CHIP_RV770) {
2126 radeon_ring_write(rdev, 0x0); 2125 radeon_ring_write(rdev, 0x0);
2127 radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1); 2126 radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1);
2128 } else { 2127 } else {
@@ -2489,11 +2488,6 @@ int r600_resume(struct radeon_device *rdev)
2489 */ 2488 */
2490 /* post card */ 2489 /* post card */
2491 atom_asic_init(rdev->mode_info.atom_context); 2490 atom_asic_init(rdev->mode_info.atom_context);
2492 /* Initialize clocks */
2493 r = radeon_clocks_init(rdev);
2494 if (r) {
2495 return r;
2496 }
2497 2491
2498 r = r600_startup(rdev); 2492 r = r600_startup(rdev);
2499 if (r) { 2493 if (r) {
@@ -2586,9 +2580,6 @@ int r600_init(struct radeon_device *rdev)
2586 radeon_surface_init(rdev); 2580 radeon_surface_init(rdev);
2587 /* Initialize clocks */ 2581 /* Initialize clocks */
2588 radeon_get_clock_info(rdev->ddev); 2582 radeon_get_clock_info(rdev->ddev);
2589 r = radeon_clocks_init(rdev);
2590 if (r)
2591 return r;
2592 /* Fence driver */ 2583 /* Fence driver */
2593 r = radeon_fence_driver_init(rdev); 2584 r = radeon_fence_driver_init(rdev);
2594 if (r) 2585 if (r)
@@ -2663,7 +2654,6 @@ void r600_fini(struct radeon_device *rdev)
2663 radeon_agp_fini(rdev); 2654 radeon_agp_fini(rdev);
2664 radeon_gem_fini(rdev); 2655 radeon_gem_fini(rdev);
2665 radeon_fence_driver_fini(rdev); 2656 radeon_fence_driver_fini(rdev);
2666 radeon_clocks_fini(rdev);
2667 radeon_bo_fini(rdev); 2657 radeon_bo_fini(rdev);
2668 radeon_atombios_fini(rdev); 2658 radeon_atombios_fini(rdev);
2669 kfree(rdev->bios); 2659 kfree(rdev->bios);
@@ -2741,7 +2731,7 @@ int r600_ib_test(struct radeon_device *rdev)
2741 if (i < rdev->usec_timeout) { 2731 if (i < rdev->usec_timeout) {
2742 DRM_INFO("ib test succeeded in %u usecs\n", i); 2732 DRM_INFO("ib test succeeded in %u usecs\n", i);
2743 } else { 2733 } else {
2744 DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n", 2734 DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
2745 scratch, tmp); 2735 scratch, tmp);
2746 r = -EINVAL; 2736 r = -EINVAL;
2747 } 2737 }
@@ -2922,7 +2912,7 @@ static void r600_disable_interrupt_state(struct radeon_device *rdev)
2922{ 2912{
2923 u32 tmp; 2913 u32 tmp;
2924 2914
2925 WREG32(CP_INT_CNTL, 0); 2915 WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
2926 WREG32(GRBM_INT_CNTL, 0); 2916 WREG32(GRBM_INT_CNTL, 0);
2927 WREG32(DxMODE_INT_MASK, 0); 2917 WREG32(DxMODE_INT_MASK, 0);
2928 if (ASIC_IS_DCE3(rdev)) { 2918 if (ASIC_IS_DCE3(rdev)) {
@@ -3540,8 +3530,9 @@ void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
3540 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read 3530 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
3541 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL 3531 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
3542 */ 3532 */
3543 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740)) { 3533 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
3544 void __iomem *ptr = (void *)rdev->gart.table.vram.ptr; 3534 rdev->vram_scratch.ptr) {
3535 void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
3545 u32 tmp; 3536 u32 tmp;
3546 3537
3547 WREG32(HDP_DEBUG1, 0); 3538 WREG32(HDP_DEBUG1, 0);
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index d13622ae74e9..3473c00781ff 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -1,3 +1,28 @@
1/*
2 * Copyright 2009 Advanced Micro Devices, Inc.
3 * Copyright 2009 Red Hat Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 */
25
1#include "drmP.h" 26#include "drmP.h"
2#include "drm.h" 27#include "drm.h"
3#include "radeon_drm.h" 28#include "radeon_drm.h"
@@ -507,6 +532,7 @@ int r600_blit_init(struct radeon_device *rdev)
507 memcpy(ptr + rdev->r600_blit.ps_offset, r6xx_ps, r6xx_ps_size * 4); 532 memcpy(ptr + rdev->r600_blit.ps_offset, r6xx_ps, r6xx_ps_size * 4);
508 radeon_bo_kunmap(rdev->r600_blit.shader_obj); 533 radeon_bo_kunmap(rdev->r600_blit.shader_obj);
509 radeon_bo_unreserve(rdev->r600_blit.shader_obj); 534 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
535 rdev->mc.active_vram_size = rdev->mc.real_vram_size;
510 return 0; 536 return 0;
511} 537}
512 538
@@ -514,6 +540,7 @@ void r600_blit_fini(struct radeon_device *rdev)
514{ 540{
515 int r; 541 int r;
516 542
543 rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
517 if (rdev->r600_blit.shader_obj == NULL) 544 if (rdev->r600_blit.shader_obj == NULL)
518 return; 545 return;
519 /* If we can't reserve the bo, unref should be enough to destroy 546 /* If we can't reserve the bo, unref should be enough to destroy
diff --git a/drivers/gpu/drm/radeon/r600_blit_shaders.h b/drivers/gpu/drm/radeon/r600_blit_shaders.h
index fdc3b378cbb0..f437d36dd98c 100644
--- a/drivers/gpu/drm/radeon/r600_blit_shaders.h
+++ b/drivers/gpu/drm/radeon/r600_blit_shaders.h
@@ -1,3 +1,27 @@
1/*
2 * Copyright 2009 Advanced Micro Devices, Inc.
3 * Copyright 2009 Red Hat Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 */
1 25
2#ifndef R600_BLIT_SHADERS_H 26#ifndef R600_BLIT_SHADERS_H
3#define R600_BLIT_SHADERS_H 27#define R600_BLIT_SHADERS_H
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index d8864949e387..250a3a918193 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -1170,9 +1170,8 @@ static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 i
1170 /* using get ib will give us the offset into the mipmap bo */ 1170 /* using get ib will give us the offset into the mipmap bo */
1171 word0 = radeon_get_ib_value(p, idx + 3) << 8; 1171 word0 = radeon_get_ib_value(p, idx + 3) << 8;
1172 if ((mipmap_size + word0) > radeon_bo_size(mipmap)) { 1172 if ((mipmap_size + word0) > radeon_bo_size(mipmap)) {
1173 dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n", 1173 /*dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n",
1174 w0, h0, bpe, blevel, nlevels, word0, mipmap_size, radeon_bo_size(texture)); 1174 w0, h0, bpe, blevel, nlevels, word0, mipmap_size, radeon_bo_size(texture));*/
1175 return -EINVAL;
1176 } 1175 }
1177 return 0; 1176 return 0;
1178} 1177}
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 3dfcfa3ca425..9ff38c99a6ea 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -344,6 +344,7 @@ struct radeon_mc {
344 * about vram size near mc fb location */ 344 * about vram size near mc fb location */
345 u64 mc_vram_size; 345 u64 mc_vram_size;
346 u64 visible_vram_size; 346 u64 visible_vram_size;
347 u64 active_vram_size;
347 u64 gtt_size; 348 u64 gtt_size;
348 u64 gtt_start; 349 u64 gtt_start;
349 u64 gtt_end; 350 u64 gtt_end;
@@ -1013,6 +1014,11 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
1013int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, 1014int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
1014 struct drm_file *filp); 1015 struct drm_file *filp);
1015 1016
1017/* VRAM scratch page for HDP bug */
1018struct r700_vram_scratch {
1019 struct radeon_bo *robj;
1020 volatile uint32_t *ptr;
1021};
1016 1022
1017/* 1023/*
1018 * Core structure, functions and helpers. 1024 * Core structure, functions and helpers.
@@ -1079,6 +1085,7 @@ struct radeon_device {
1079 const struct firmware *pfp_fw; /* r6/700 PFP firmware */ 1085 const struct firmware *pfp_fw; /* r6/700 PFP firmware */
1080 const struct firmware *rlc_fw; /* r6/700 RLC firmware */ 1086 const struct firmware *rlc_fw; /* r6/700 RLC firmware */
1081 struct r600_blit r600_blit; 1087 struct r600_blit r600_blit;
1088 struct r700_vram_scratch vram_scratch;
1082 int msi_enabled; /* msi enabled */ 1089 int msi_enabled; /* msi enabled */
1083 struct r600_ih ih; /* r6/700 interrupt ring */ 1090 struct r600_ih ih; /* r6/700 interrupt ring */
1084 struct workqueue_struct *wq; 1091 struct workqueue_struct *wq;
@@ -1333,8 +1340,6 @@ extern bool radeon_card_posted(struct radeon_device *rdev);
1333extern void radeon_update_bandwidth_info(struct radeon_device *rdev); 1340extern void radeon_update_bandwidth_info(struct radeon_device *rdev);
1334extern void radeon_update_display_priority(struct radeon_device *rdev); 1341extern void radeon_update_display_priority(struct radeon_device *rdev);
1335extern bool radeon_boot_test_post_card(struct radeon_device *rdev); 1342extern bool radeon_boot_test_post_card(struct radeon_device *rdev);
1336extern int radeon_clocks_init(struct radeon_device *rdev);
1337extern void radeon_clocks_fini(struct radeon_device *rdev);
1338extern void radeon_scratch_init(struct radeon_device *rdev); 1343extern void radeon_scratch_init(struct radeon_device *rdev);
1339extern void radeon_surface_init(struct radeon_device *rdev); 1344extern void radeon_surface_init(struct radeon_device *rdev);
1340extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data); 1345extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data);
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c
index f40dfb77f9b1..bd2f33e5c91a 100644
--- a/drivers/gpu/drm/radeon/radeon_agp.c
+++ b/drivers/gpu/drm/radeon/radeon_agp.c
@@ -156,7 +156,13 @@ int radeon_agp_init(struct radeon_device *rdev)
156 } 156 }
157 157
158 mode.mode = info.mode; 158 mode.mode = info.mode;
159 agp_status = (RREG32(RADEON_AGP_STATUS) | RADEON_AGPv3_MODE) & mode.mode; 159 /* chips with the agp to pcie bridge don't have the AGP_STATUS register
160 * Just use the whatever mode the host sets up.
161 */
162 if (rdev->family <= CHIP_RV350)
163 agp_status = (RREG32(RADEON_AGP_STATUS) | RADEON_AGPv3_MODE) & mode.mode;
164 else
165 agp_status = mode.mode;
160 is_v3 = !!(agp_status & RADEON_AGPv3_MODE); 166 is_v3 = !!(agp_status & RADEON_AGPv3_MODE);
161 167
162 if (is_v3) { 168 if (is_v3) {
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 646f96f97c77..25e1dd197791 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -733,6 +733,7 @@ static struct radeon_asic evergreen_asic = {
733 .set_engine_clock = &radeon_atom_set_engine_clock, 733 .set_engine_clock = &radeon_atom_set_engine_clock,
734 .get_memory_clock = &radeon_atom_get_memory_clock, 734 .get_memory_clock = &radeon_atom_get_memory_clock,
735 .set_memory_clock = &radeon_atom_set_memory_clock, 735 .set_memory_clock = &radeon_atom_set_memory_clock,
736 .get_pcie_lanes = NULL,
736 .set_pcie_lanes = NULL, 737 .set_pcie_lanes = NULL,
737 .set_clock_gating = NULL, 738 .set_clock_gating = NULL,
738 .set_surface_reg = r600_set_surface_reg, 739 .set_surface_reg = r600_set_surface_reg,
@@ -857,21 +858,3 @@ int radeon_asic_init(struct radeon_device *rdev)
857 return 0; 858 return 0;
858} 859}
859 860
860/*
861 * Wrapper around modesetting bits. Move to radeon_clocks.c?
862 */
863int radeon_clocks_init(struct radeon_device *rdev)
864{
865 int r;
866
867 r = radeon_static_clocks_init(rdev->ddev);
868 if (r) {
869 return r;
870 }
871 DRM_INFO("Clocks initialized !\n");
872 return 0;
873}
874
875void radeon_clocks_fini(struct radeon_device *rdev)
876{
877}
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 6d30868744ee..8e43ddae70cc 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -32,11 +32,11 @@
32 32
33/* from radeon_encoder.c */ 33/* from radeon_encoder.c */
34extern uint32_t 34extern uint32_t
35radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, 35radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device,
36 uint8_t dac); 36 uint8_t dac);
37extern void radeon_link_encoder_connector(struct drm_device *dev); 37extern void radeon_link_encoder_connector(struct drm_device *dev);
38extern void 38extern void
39radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, 39radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum,
40 uint32_t supported_device); 40 uint32_t supported_device);
41 41
42/* from radeon_connector.c */ 42/* from radeon_connector.c */
@@ -46,14 +46,14 @@ radeon_add_atom_connector(struct drm_device *dev,
46 uint32_t supported_device, 46 uint32_t supported_device,
47 int connector_type, 47 int connector_type,
48 struct radeon_i2c_bus_rec *i2c_bus, 48 struct radeon_i2c_bus_rec *i2c_bus,
49 bool linkb, uint32_t igp_lane_info, 49 uint32_t igp_lane_info,
50 uint16_t connector_object_id, 50 uint16_t connector_object_id,
51 struct radeon_hpd *hpd, 51 struct radeon_hpd *hpd,
52 struct radeon_router *router); 52 struct radeon_router *router);
53 53
54/* from radeon_legacy_encoder.c */ 54/* from radeon_legacy_encoder.c */
55extern void 55extern void
56radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, 56radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum,
57 uint32_t supported_device); 57 uint32_t supported_device);
58 58
59union atom_supported_devices { 59union atom_supported_devices {
@@ -85,6 +85,19 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev
85 for (i = 0; i < num_indices; i++) { 85 for (i = 0; i < num_indices; i++) {
86 gpio = &i2c_info->asGPIO_Info[i]; 86 gpio = &i2c_info->asGPIO_Info[i];
87 87
88 /* some evergreen boards have bad data for this entry */
89 if (ASIC_IS_DCE4(rdev)) {
90 if ((i == 7) &&
91 (gpio->usClkMaskRegisterIndex == 0x1936) &&
92 (gpio->sucI2cId.ucAccess == 0)) {
93 gpio->sucI2cId.ucAccess = 0x97;
94 gpio->ucDataMaskShift = 8;
95 gpio->ucDataEnShift = 8;
96 gpio->ucDataY_Shift = 8;
97 gpio->ucDataA_Shift = 8;
98 }
99 }
100
88 if (gpio->sucI2cId.ucAccess == id) { 101 if (gpio->sucI2cId.ucAccess == id) {
89 i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; 102 i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
90 i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4; 103 i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
@@ -147,6 +160,20 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
147 for (i = 0; i < num_indices; i++) { 160 for (i = 0; i < num_indices; i++) {
148 gpio = &i2c_info->asGPIO_Info[i]; 161 gpio = &i2c_info->asGPIO_Info[i];
149 i2c.valid = false; 162 i2c.valid = false;
163
164 /* some evergreen boards have bad data for this entry */
165 if (ASIC_IS_DCE4(rdev)) {
166 if ((i == 7) &&
167 (gpio->usClkMaskRegisterIndex == 0x1936) &&
168 (gpio->sucI2cId.ucAccess == 0)) {
169 gpio->sucI2cId.ucAccess = 0x97;
170 gpio->ucDataMaskShift = 8;
171 gpio->ucDataEnShift = 8;
172 gpio->ucDataY_Shift = 8;
173 gpio->ucDataA_Shift = 8;
174 }
175 }
176
150 i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; 177 i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
151 i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4; 178 i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
152 i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4; 179 i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
@@ -226,6 +253,8 @@ static struct radeon_hpd radeon_atom_get_hpd_info_from_gpio(struct radeon_device
226 struct radeon_hpd hpd; 253 struct radeon_hpd hpd;
227 u32 reg; 254 u32 reg;
228 255
256 memset(&hpd, 0, sizeof(struct radeon_hpd));
257
229 if (ASIC_IS_DCE4(rdev)) 258 if (ASIC_IS_DCE4(rdev))
230 reg = EVERGREEN_DC_GPIO_HPD_A; 259 reg = EVERGREEN_DC_GPIO_HPD_A;
231 else 260 else
@@ -288,6 +317,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
288 *connector_type = DRM_MODE_CONNECTOR_DVID; 317 *connector_type = DRM_MODE_CONNECTOR_DVID;
289 } 318 }
290 319
320 /* MSI K9A2GM V2/V3 board has no HDMI or DVI */
321 if ((dev->pdev->device == 0x796e) &&
322 (dev->pdev->subsystem_vendor == 0x1462) &&
323 (dev->pdev->subsystem_device == 0x7302)) {
324 if ((supported_device == ATOM_DEVICE_DFP2_SUPPORT) ||
325 (supported_device == ATOM_DEVICE_DFP3_SUPPORT))
326 return false;
327 }
328
291 /* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */ 329 /* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */
292 if ((dev->pdev->device == 0x7941) && 330 if ((dev->pdev->device == 0x7941) &&
293 (dev->pdev->subsystem_vendor == 0x147b) && 331 (dev->pdev->subsystem_vendor == 0x147b) &&
@@ -477,7 +515,6 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
477 int i, j, k, path_size, device_support; 515 int i, j, k, path_size, device_support;
478 int connector_type; 516 int connector_type;
479 u16 igp_lane_info, conn_id, connector_object_id; 517 u16 igp_lane_info, conn_id, connector_object_id;
480 bool linkb;
481 struct radeon_i2c_bus_rec ddc_bus; 518 struct radeon_i2c_bus_rec ddc_bus;
482 struct radeon_router router; 519 struct radeon_router router;
483 struct radeon_gpio_rec gpio; 520 struct radeon_gpio_rec gpio;
@@ -510,7 +547,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
510 addr += path_size; 547 addr += path_size;
511 path = (ATOM_DISPLAY_OBJECT_PATH *) addr; 548 path = (ATOM_DISPLAY_OBJECT_PATH *) addr;
512 path_size += le16_to_cpu(path->usSize); 549 path_size += le16_to_cpu(path->usSize);
513 linkb = false; 550
514 if (device_support & le16_to_cpu(path->usDeviceTag)) { 551 if (device_support & le16_to_cpu(path->usDeviceTag)) {
515 uint8_t con_obj_id, con_obj_num, con_obj_type; 552 uint8_t con_obj_id, con_obj_num, con_obj_type;
516 553
@@ -601,13 +638,10 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
601 OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT; 638 OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
602 639
603 if (grph_obj_type == GRAPH_OBJECT_TYPE_ENCODER) { 640 if (grph_obj_type == GRAPH_OBJECT_TYPE_ENCODER) {
604 if (grph_obj_num == 2) 641 u16 encoder_obj = le16_to_cpu(path->usGraphicObjIds[j]);
605 linkb = true;
606 else
607 linkb = false;
608 642
609 radeon_add_atom_encoder(dev, 643 radeon_add_atom_encoder(dev,
610 grph_obj_id, 644 encoder_obj,
611 le16_to_cpu 645 le16_to_cpu
612 (path-> 646 (path->
613 usDeviceTag)); 647 usDeviceTag));
@@ -744,7 +778,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
744 le16_to_cpu(path-> 778 le16_to_cpu(path->
745 usDeviceTag), 779 usDeviceTag),
746 connector_type, &ddc_bus, 780 connector_type, &ddc_bus,
747 linkb, igp_lane_info, 781 igp_lane_info,
748 connector_object_id, 782 connector_object_id,
749 &hpd, 783 &hpd,
750 &router); 784 &router);
@@ -933,13 +967,13 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
933 967
934 if (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom) 968 if (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom)
935 radeon_add_atom_encoder(dev, 969 radeon_add_atom_encoder(dev,
936 radeon_get_encoder_id(dev, 970 radeon_get_encoder_enum(dev,
937 (1 << i), 971 (1 << i),
938 dac), 972 dac),
939 (1 << i)); 973 (1 << i));
940 else 974 else
941 radeon_add_legacy_encoder(dev, 975 radeon_add_legacy_encoder(dev,
942 radeon_get_encoder_id(dev, 976 radeon_get_encoder_enum(dev,
943 (1 << i), 977 (1 << i),
944 dac), 978 dac),
945 (1 << i)); 979 (1 << i));
@@ -996,7 +1030,7 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
996 bios_connectors[i]. 1030 bios_connectors[i].
997 connector_type, 1031 connector_type,
998 &bios_connectors[i].ddc_bus, 1032 &bios_connectors[i].ddc_bus,
999 false, 0, 1033 0,
1000 connector_object_id, 1034 connector_object_id,
1001 &bios_connectors[i].hpd, 1035 &bios_connectors[i].hpd,
1002 &router); 1036 &router);
@@ -1183,7 +1217,7 @@ bool radeon_atombios_sideport_present(struct radeon_device *rdev)
1183 return true; 1217 return true;
1184 break; 1218 break;
1185 case 2: 1219 case 2:
1186 if (igp_info->info_2.ucMemoryType & 0x0f) 1220 if (igp_info->info_2.ulBootUpSidePortClock)
1187 return true; 1221 return true;
1188 break; 1222 break;
1189 default: 1223 default:
@@ -1305,6 +1339,7 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
1305 union lvds_info *lvds_info; 1339 union lvds_info *lvds_info;
1306 uint8_t frev, crev; 1340 uint8_t frev, crev;
1307 struct radeon_encoder_atom_dig *lvds = NULL; 1341 struct radeon_encoder_atom_dig *lvds = NULL;
1342 int encoder_enum = (encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
1308 1343
1309 if (atom_parse_data_header(mode_info->atom_context, index, NULL, 1344 if (atom_parse_data_header(mode_info->atom_context, index, NULL,
1310 &frev, &crev, &data_offset)) { 1345 &frev, &crev, &data_offset)) {
@@ -1368,6 +1403,12 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
1368 } 1403 }
1369 1404
1370 encoder->native_mode = lvds->native_mode; 1405 encoder->native_mode = lvds->native_mode;
1406
1407 if (encoder_enum == 2)
1408 lvds->linkb = true;
1409 else
1410 lvds->linkb = false;
1411
1371 } 1412 }
1372 return lvds; 1413 return lvds;
1373} 1414}
@@ -1517,39 +1558,39 @@ radeon_atombios_get_tv_info(struct radeon_device *rdev)
1517 switch (tv_info->ucTV_BootUpDefaultStandard) { 1558 switch (tv_info->ucTV_BootUpDefaultStandard) {
1518 case ATOM_TV_NTSC: 1559 case ATOM_TV_NTSC:
1519 tv_std = TV_STD_NTSC; 1560 tv_std = TV_STD_NTSC;
1520 DRM_INFO("Default TV standard: NTSC\n"); 1561 DRM_DEBUG_KMS("Default TV standard: NTSC\n");
1521 break; 1562 break;
1522 case ATOM_TV_NTSCJ: 1563 case ATOM_TV_NTSCJ:
1523 tv_std = TV_STD_NTSC_J; 1564 tv_std = TV_STD_NTSC_J;
1524 DRM_INFO("Default TV standard: NTSC-J\n"); 1565 DRM_DEBUG_KMS("Default TV standard: NTSC-J\n");
1525 break; 1566 break;
1526 case ATOM_TV_PAL: 1567 case ATOM_TV_PAL:
1527 tv_std = TV_STD_PAL; 1568 tv_std = TV_STD_PAL;
1528 DRM_INFO("Default TV standard: PAL\n"); 1569 DRM_DEBUG_KMS("Default TV standard: PAL\n");
1529 break; 1570 break;
1530 case ATOM_TV_PALM: 1571 case ATOM_TV_PALM:
1531 tv_std = TV_STD_PAL_M; 1572 tv_std = TV_STD_PAL_M;
1532 DRM_INFO("Default TV standard: PAL-M\n"); 1573 DRM_DEBUG_KMS("Default TV standard: PAL-M\n");
1533 break; 1574 break;
1534 case ATOM_TV_PALN: 1575 case ATOM_TV_PALN:
1535 tv_std = TV_STD_PAL_N; 1576 tv_std = TV_STD_PAL_N;
1536 DRM_INFO("Default TV standard: PAL-N\n"); 1577 DRM_DEBUG_KMS("Default TV standard: PAL-N\n");
1537 break; 1578 break;
1538 case ATOM_TV_PALCN: 1579 case ATOM_TV_PALCN:
1539 tv_std = TV_STD_PAL_CN; 1580 tv_std = TV_STD_PAL_CN;
1540 DRM_INFO("Default TV standard: PAL-CN\n"); 1581 DRM_DEBUG_KMS("Default TV standard: PAL-CN\n");
1541 break; 1582 break;
1542 case ATOM_TV_PAL60: 1583 case ATOM_TV_PAL60:
1543 tv_std = TV_STD_PAL_60; 1584 tv_std = TV_STD_PAL_60;
1544 DRM_INFO("Default TV standard: PAL-60\n"); 1585 DRM_DEBUG_KMS("Default TV standard: PAL-60\n");
1545 break; 1586 break;
1546 case ATOM_TV_SECAM: 1587 case ATOM_TV_SECAM:
1547 tv_std = TV_STD_SECAM; 1588 tv_std = TV_STD_SECAM;
1548 DRM_INFO("Default TV standard: SECAM\n"); 1589 DRM_DEBUG_KMS("Default TV standard: SECAM\n");
1549 break; 1590 break;
1550 default: 1591 default:
1551 tv_std = TV_STD_NTSC; 1592 tv_std = TV_STD_NTSC;
1552 DRM_INFO("Unknown TV standard; defaulting to NTSC\n"); 1593 DRM_DEBUG_KMS("Unknown TV standard; defaulting to NTSC\n");
1553 break; 1594 break;
1554 } 1595 }
1555 } 1596 }
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
index 14448a740ba6..5249af8931e6 100644
--- a/drivers/gpu/drm/radeon/radeon_clocks.c
+++ b/drivers/gpu/drm/radeon/radeon_clocks.c
@@ -327,6 +327,14 @@ void radeon_get_clock_info(struct drm_device *dev)
327 mpll->max_feedback_div = 0xff; 327 mpll->max_feedback_div = 0xff;
328 mpll->best_vco = 0; 328 mpll->best_vco = 0;
329 329
330 if (!rdev->clock.default_sclk)
331 rdev->clock.default_sclk = radeon_get_engine_clock(rdev);
332 if ((!rdev->clock.default_mclk) && rdev->asic->get_memory_clock)
333 rdev->clock.default_mclk = radeon_get_memory_clock(rdev);
334
335 rdev->pm.current_sclk = rdev->clock.default_sclk;
336 rdev->pm.current_mclk = rdev->clock.default_mclk;
337
330} 338}
331 339
332/* 10 khz */ 340/* 10 khz */
@@ -897,53 +905,3 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable)
897 } 905 }
898} 906}
899 907
900static void radeon_apply_clock_quirks(struct radeon_device *rdev)
901{
902 uint32_t tmp;
903
904 /* XXX make sure engine is idle */
905
906 if (rdev->family < CHIP_RS600) {
907 tmp = RREG32_PLL(RADEON_SCLK_CNTL);
908 if (ASIC_IS_R300(rdev) || ASIC_IS_RV100(rdev))
909 tmp |= RADEON_SCLK_FORCE_CP | RADEON_SCLK_FORCE_VIP;
910 if ((rdev->family == CHIP_RV250)
911 || (rdev->family == CHIP_RV280))
912 tmp |=
913 RADEON_SCLK_FORCE_DISP1 | RADEON_SCLK_FORCE_DISP2;
914 if ((rdev->family == CHIP_RV350)
915 || (rdev->family == CHIP_RV380))
916 tmp |= R300_SCLK_FORCE_VAP;
917 if (rdev->family == CHIP_R420)
918 tmp |= R300_SCLK_FORCE_PX | R300_SCLK_FORCE_TX;
919 WREG32_PLL(RADEON_SCLK_CNTL, tmp);
920 } else if (rdev->family < CHIP_R600) {
921 tmp = RREG32_PLL(AVIVO_CP_DYN_CNTL);
922 tmp |= AVIVO_CP_FORCEON;
923 WREG32_PLL(AVIVO_CP_DYN_CNTL, tmp);
924
925 tmp = RREG32_PLL(AVIVO_E2_DYN_CNTL);
926 tmp |= AVIVO_E2_FORCEON;
927 WREG32_PLL(AVIVO_E2_DYN_CNTL, tmp);
928
929 tmp = RREG32_PLL(AVIVO_IDCT_DYN_CNTL);
930 tmp |= AVIVO_IDCT_FORCEON;
931 WREG32_PLL(AVIVO_IDCT_DYN_CNTL, tmp);
932 }
933}
934
935int radeon_static_clocks_init(struct drm_device *dev)
936{
937 struct radeon_device *rdev = dev->dev_private;
938
939 /* XXX make sure engine is idle */
940
941 if (radeon_dynclks != -1) {
942 if (radeon_dynclks) {
943 if (rdev->asic->set_clock_gating)
944 radeon_set_clock_gating(rdev, 1);
945 }
946 }
947 radeon_apply_clock_quirks(rdev);
948 return 0;
949}
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 885dcfac1838..7b7ea269549c 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -39,8 +39,8 @@
39 39
40/* from radeon_encoder.c */ 40/* from radeon_encoder.c */
41extern uint32_t 41extern uint32_t
42radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, 42radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device,
43 uint8_t dac); 43 uint8_t dac);
44extern void radeon_link_encoder_connector(struct drm_device *dev); 44extern void radeon_link_encoder_connector(struct drm_device *dev);
45 45
46/* from radeon_connector.c */ 46/* from radeon_connector.c */
@@ -55,7 +55,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
55 55
56/* from radeon_legacy_encoder.c */ 56/* from radeon_legacy_encoder.c */
57extern void 57extern void
58radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, 58radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum,
59 uint32_t supported_device); 59 uint32_t supported_device);
60 60
61/* old legacy ATI BIOS routines */ 61/* old legacy ATI BIOS routines */
@@ -913,47 +913,47 @@ radeon_combios_get_tv_info(struct radeon_device *rdev)
913 switch (RBIOS8(tv_info + 7) & 0xf) { 913 switch (RBIOS8(tv_info + 7) & 0xf) {
914 case 1: 914 case 1:
915 tv_std = TV_STD_NTSC; 915 tv_std = TV_STD_NTSC;
916 DRM_INFO("Default TV standard: NTSC\n"); 916 DRM_DEBUG_KMS("Default TV standard: NTSC\n");
917 break; 917 break;
918 case 2: 918 case 2:
919 tv_std = TV_STD_PAL; 919 tv_std = TV_STD_PAL;
920 DRM_INFO("Default TV standard: PAL\n"); 920 DRM_DEBUG_KMS("Default TV standard: PAL\n");
921 break; 921 break;
922 case 3: 922 case 3:
923 tv_std = TV_STD_PAL_M; 923 tv_std = TV_STD_PAL_M;
924 DRM_INFO("Default TV standard: PAL-M\n"); 924 DRM_DEBUG_KMS("Default TV standard: PAL-M\n");
925 break; 925 break;
926 case 4: 926 case 4:
927 tv_std = TV_STD_PAL_60; 927 tv_std = TV_STD_PAL_60;
928 DRM_INFO("Default TV standard: PAL-60\n"); 928 DRM_DEBUG_KMS("Default TV standard: PAL-60\n");
929 break; 929 break;
930 case 5: 930 case 5:
931 tv_std = TV_STD_NTSC_J; 931 tv_std = TV_STD_NTSC_J;
932 DRM_INFO("Default TV standard: NTSC-J\n"); 932 DRM_DEBUG_KMS("Default TV standard: NTSC-J\n");
933 break; 933 break;
934 case 6: 934 case 6:
935 tv_std = TV_STD_SCART_PAL; 935 tv_std = TV_STD_SCART_PAL;
936 DRM_INFO("Default TV standard: SCART-PAL\n"); 936 DRM_DEBUG_KMS("Default TV standard: SCART-PAL\n");
937 break; 937 break;
938 default: 938 default:
939 tv_std = TV_STD_NTSC; 939 tv_std = TV_STD_NTSC;
940 DRM_INFO 940 DRM_DEBUG_KMS
941 ("Unknown TV standard; defaulting to NTSC\n"); 941 ("Unknown TV standard; defaulting to NTSC\n");
942 break; 942 break;
943 } 943 }
944 944
945 switch ((RBIOS8(tv_info + 9) >> 2) & 0x3) { 945 switch ((RBIOS8(tv_info + 9) >> 2) & 0x3) {
946 case 0: 946 case 0:
947 DRM_INFO("29.498928713 MHz TV ref clk\n"); 947 DRM_DEBUG_KMS("29.498928713 MHz TV ref clk\n");
948 break; 948 break;
949 case 1: 949 case 1:
950 DRM_INFO("28.636360000 MHz TV ref clk\n"); 950 DRM_DEBUG_KMS("28.636360000 MHz TV ref clk\n");
951 break; 951 break;
952 case 2: 952 case 2:
953 DRM_INFO("14.318180000 MHz TV ref clk\n"); 953 DRM_DEBUG_KMS("14.318180000 MHz TV ref clk\n");
954 break; 954 break;
955 case 3: 955 case 3:
956 DRM_INFO("27.000000000 MHz TV ref clk\n"); 956 DRM_DEBUG_KMS("27.000000000 MHz TV ref clk\n");
957 break; 957 break;
958 default: 958 default:
959 break; 959 break;
@@ -1324,7 +1324,7 @@ bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder,
1324 1324
1325 if (tmds_info) { 1325 if (tmds_info) {
1326 ver = RBIOS8(tmds_info); 1326 ver = RBIOS8(tmds_info);
1327 DRM_INFO("DFP table revision: %d\n", ver); 1327 DRM_DEBUG_KMS("DFP table revision: %d\n", ver);
1328 if (ver == 3) { 1328 if (ver == 3) {
1329 n = RBIOS8(tmds_info + 5) + 1; 1329 n = RBIOS8(tmds_info + 5) + 1;
1330 if (n > 4) 1330 if (n > 4)
@@ -1408,7 +1408,7 @@ bool radeon_legacy_get_ext_tmds_info_from_combios(struct radeon_encoder *encoder
1408 offset = combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE); 1408 offset = combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE);
1409 if (offset) { 1409 if (offset) {
1410 ver = RBIOS8(offset); 1410 ver = RBIOS8(offset);
1411 DRM_INFO("External TMDS Table revision: %d\n", ver); 1411 DRM_DEBUG_KMS("External TMDS Table revision: %d\n", ver);
1412 tmds->slave_addr = RBIOS8(offset + 4 + 2); 1412 tmds->slave_addr = RBIOS8(offset + 4 + 2);
1413 tmds->slave_addr >>= 1; /* 7 bit addressing */ 1413 tmds->slave_addr >>= 1; /* 7 bit addressing */
1414 gpio = RBIOS8(offset + 4 + 3); 1414 gpio = RBIOS8(offset + 4 + 3);
@@ -1485,6 +1485,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1485 /* PowerMac8,1 ? */ 1485 /* PowerMac8,1 ? */
1486 /* imac g5 isight */ 1486 /* imac g5 isight */
1487 rdev->mode_info.connector_table = CT_IMAC_G5_ISIGHT; 1487 rdev->mode_info.connector_table = CT_IMAC_G5_ISIGHT;
1488 } else if ((rdev->pdev->device == 0x4a48) &&
1489 (rdev->pdev->subsystem_vendor == 0x1002) &&
1490 (rdev->pdev->subsystem_device == 0x4a48)) {
1491 /* Mac X800 */
1492 rdev->mode_info.connector_table = CT_MAC_X800;
1488 } else 1493 } else
1489#endif /* CONFIG_PPC_PMAC */ 1494#endif /* CONFIG_PPC_PMAC */
1490#ifdef CONFIG_PPC64 1495#ifdef CONFIG_PPC64
@@ -1505,7 +1510,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1505 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); 1510 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
1506 hpd.hpd = RADEON_HPD_NONE; 1511 hpd.hpd = RADEON_HPD_NONE;
1507 radeon_add_legacy_encoder(dev, 1512 radeon_add_legacy_encoder(dev,
1508 radeon_get_encoder_id(dev, 1513 radeon_get_encoder_enum(dev,
1509 ATOM_DEVICE_CRT1_SUPPORT, 1514 ATOM_DEVICE_CRT1_SUPPORT,
1510 1), 1515 1),
1511 ATOM_DEVICE_CRT1_SUPPORT); 1516 ATOM_DEVICE_CRT1_SUPPORT);
@@ -1520,7 +1525,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1520 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_NONE_DETECTED, 0, 0); 1525 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_NONE_DETECTED, 0, 0);
1521 hpd.hpd = RADEON_HPD_NONE; 1526 hpd.hpd = RADEON_HPD_NONE;
1522 radeon_add_legacy_encoder(dev, 1527 radeon_add_legacy_encoder(dev,
1523 radeon_get_encoder_id(dev, 1528 radeon_get_encoder_enum(dev,
1524 ATOM_DEVICE_LCD1_SUPPORT, 1529 ATOM_DEVICE_LCD1_SUPPORT,
1525 0), 1530 0),
1526 ATOM_DEVICE_LCD1_SUPPORT); 1531 ATOM_DEVICE_LCD1_SUPPORT);
@@ -1535,7 +1540,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1535 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); 1540 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
1536 hpd.hpd = RADEON_HPD_NONE; 1541 hpd.hpd = RADEON_HPD_NONE;
1537 radeon_add_legacy_encoder(dev, 1542 radeon_add_legacy_encoder(dev,
1538 radeon_get_encoder_id(dev, 1543 radeon_get_encoder_enum(dev,
1539 ATOM_DEVICE_CRT1_SUPPORT, 1544 ATOM_DEVICE_CRT1_SUPPORT,
1540 1), 1545 1),
1541 ATOM_DEVICE_CRT1_SUPPORT); 1546 ATOM_DEVICE_CRT1_SUPPORT);
@@ -1550,12 +1555,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1550 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0); 1555 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
1551 hpd.hpd = RADEON_HPD_1; 1556 hpd.hpd = RADEON_HPD_1;
1552 radeon_add_legacy_encoder(dev, 1557 radeon_add_legacy_encoder(dev,
1553 radeon_get_encoder_id(dev, 1558 radeon_get_encoder_enum(dev,
1554 ATOM_DEVICE_DFP1_SUPPORT, 1559 ATOM_DEVICE_DFP1_SUPPORT,
1555 0), 1560 0),
1556 ATOM_DEVICE_DFP1_SUPPORT); 1561 ATOM_DEVICE_DFP1_SUPPORT);
1557 radeon_add_legacy_encoder(dev, 1562 radeon_add_legacy_encoder(dev,
1558 radeon_get_encoder_id(dev, 1563 radeon_get_encoder_enum(dev,
1559 ATOM_DEVICE_CRT2_SUPPORT, 1564 ATOM_DEVICE_CRT2_SUPPORT,
1560 2), 1565 2),
1561 ATOM_DEVICE_CRT2_SUPPORT); 1566 ATOM_DEVICE_CRT2_SUPPORT);
@@ -1571,7 +1576,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1571 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); 1576 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
1572 hpd.hpd = RADEON_HPD_NONE; 1577 hpd.hpd = RADEON_HPD_NONE;
1573 radeon_add_legacy_encoder(dev, 1578 radeon_add_legacy_encoder(dev,
1574 radeon_get_encoder_id(dev, 1579 radeon_get_encoder_enum(dev,
1575 ATOM_DEVICE_CRT1_SUPPORT, 1580 ATOM_DEVICE_CRT1_SUPPORT,
1576 1), 1581 1),
1577 ATOM_DEVICE_CRT1_SUPPORT); 1582 ATOM_DEVICE_CRT1_SUPPORT);
@@ -1588,7 +1593,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1588 ddc_i2c.valid = false; 1593 ddc_i2c.valid = false;
1589 hpd.hpd = RADEON_HPD_NONE; 1594 hpd.hpd = RADEON_HPD_NONE;
1590 radeon_add_legacy_encoder(dev, 1595 radeon_add_legacy_encoder(dev,
1591 radeon_get_encoder_id(dev, 1596 radeon_get_encoder_enum(dev,
1592 ATOM_DEVICE_TV1_SUPPORT, 1597 ATOM_DEVICE_TV1_SUPPORT,
1593 2), 1598 2),
1594 ATOM_DEVICE_TV1_SUPPORT); 1599 ATOM_DEVICE_TV1_SUPPORT);
@@ -1607,7 +1612,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1607 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0); 1612 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
1608 hpd.hpd = RADEON_HPD_NONE; 1613 hpd.hpd = RADEON_HPD_NONE;
1609 radeon_add_legacy_encoder(dev, 1614 radeon_add_legacy_encoder(dev,
1610 radeon_get_encoder_id(dev, 1615 radeon_get_encoder_enum(dev,
1611 ATOM_DEVICE_LCD1_SUPPORT, 1616 ATOM_DEVICE_LCD1_SUPPORT,
1612 0), 1617 0),
1613 ATOM_DEVICE_LCD1_SUPPORT); 1618 ATOM_DEVICE_LCD1_SUPPORT);
@@ -1619,7 +1624,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1619 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); 1624 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
1620 hpd.hpd = RADEON_HPD_NONE; 1625 hpd.hpd = RADEON_HPD_NONE;
1621 radeon_add_legacy_encoder(dev, 1626 radeon_add_legacy_encoder(dev,
1622 radeon_get_encoder_id(dev, 1627 radeon_get_encoder_enum(dev,
1623 ATOM_DEVICE_CRT2_SUPPORT, 1628 ATOM_DEVICE_CRT2_SUPPORT,
1624 2), 1629 2),
1625 ATOM_DEVICE_CRT2_SUPPORT); 1630 ATOM_DEVICE_CRT2_SUPPORT);
@@ -1631,7 +1636,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1631 ddc_i2c.valid = false; 1636 ddc_i2c.valid = false;
1632 hpd.hpd = RADEON_HPD_NONE; 1637 hpd.hpd = RADEON_HPD_NONE;
1633 radeon_add_legacy_encoder(dev, 1638 radeon_add_legacy_encoder(dev,
1634 radeon_get_encoder_id(dev, 1639 radeon_get_encoder_enum(dev,
1635 ATOM_DEVICE_TV1_SUPPORT, 1640 ATOM_DEVICE_TV1_SUPPORT,
1636 2), 1641 2),
1637 ATOM_DEVICE_TV1_SUPPORT); 1642 ATOM_DEVICE_TV1_SUPPORT);
@@ -1648,7 +1653,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1648 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0); 1653 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
1649 hpd.hpd = RADEON_HPD_NONE; 1654 hpd.hpd = RADEON_HPD_NONE;
1650 radeon_add_legacy_encoder(dev, 1655 radeon_add_legacy_encoder(dev,
1651 radeon_get_encoder_id(dev, 1656 radeon_get_encoder_enum(dev,
1652 ATOM_DEVICE_LCD1_SUPPORT, 1657 ATOM_DEVICE_LCD1_SUPPORT,
1653 0), 1658 0),
1654 ATOM_DEVICE_LCD1_SUPPORT); 1659 ATOM_DEVICE_LCD1_SUPPORT);
@@ -1660,12 +1665,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1660 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); 1665 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
1661 hpd.hpd = RADEON_HPD_2; /* ??? */ 1666 hpd.hpd = RADEON_HPD_2; /* ??? */
1662 radeon_add_legacy_encoder(dev, 1667 radeon_add_legacy_encoder(dev,
1663 radeon_get_encoder_id(dev, 1668 radeon_get_encoder_enum(dev,
1664 ATOM_DEVICE_DFP2_SUPPORT, 1669 ATOM_DEVICE_DFP2_SUPPORT,
1665 0), 1670 0),
1666 ATOM_DEVICE_DFP2_SUPPORT); 1671 ATOM_DEVICE_DFP2_SUPPORT);
1667 radeon_add_legacy_encoder(dev, 1672 radeon_add_legacy_encoder(dev,
1668 radeon_get_encoder_id(dev, 1673 radeon_get_encoder_enum(dev,
1669 ATOM_DEVICE_CRT1_SUPPORT, 1674 ATOM_DEVICE_CRT1_SUPPORT,
1670 1), 1675 1),
1671 ATOM_DEVICE_CRT1_SUPPORT); 1676 ATOM_DEVICE_CRT1_SUPPORT);
@@ -1680,7 +1685,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1680 ddc_i2c.valid = false; 1685 ddc_i2c.valid = false;
1681 hpd.hpd = RADEON_HPD_NONE; 1686 hpd.hpd = RADEON_HPD_NONE;
1682 radeon_add_legacy_encoder(dev, 1687 radeon_add_legacy_encoder(dev,
1683 radeon_get_encoder_id(dev, 1688 radeon_get_encoder_enum(dev,
1684 ATOM_DEVICE_TV1_SUPPORT, 1689 ATOM_DEVICE_TV1_SUPPORT,
1685 2), 1690 2),
1686 ATOM_DEVICE_TV1_SUPPORT); 1691 ATOM_DEVICE_TV1_SUPPORT);
@@ -1697,7 +1702,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1697 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0); 1702 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
1698 hpd.hpd = RADEON_HPD_NONE; 1703 hpd.hpd = RADEON_HPD_NONE;
1699 radeon_add_legacy_encoder(dev, 1704 radeon_add_legacy_encoder(dev,
1700 radeon_get_encoder_id(dev, 1705 radeon_get_encoder_enum(dev,
1701 ATOM_DEVICE_LCD1_SUPPORT, 1706 ATOM_DEVICE_LCD1_SUPPORT,
1702 0), 1707 0),
1703 ATOM_DEVICE_LCD1_SUPPORT); 1708 ATOM_DEVICE_LCD1_SUPPORT);
@@ -1709,12 +1714,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1709 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); 1714 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
1710 hpd.hpd = RADEON_HPD_1; /* ??? */ 1715 hpd.hpd = RADEON_HPD_1; /* ??? */
1711 radeon_add_legacy_encoder(dev, 1716 radeon_add_legacy_encoder(dev,
1712 radeon_get_encoder_id(dev, 1717 radeon_get_encoder_enum(dev,
1713 ATOM_DEVICE_DFP1_SUPPORT, 1718 ATOM_DEVICE_DFP1_SUPPORT,
1714 0), 1719 0),
1715 ATOM_DEVICE_DFP1_SUPPORT); 1720 ATOM_DEVICE_DFP1_SUPPORT);
1716 radeon_add_legacy_encoder(dev, 1721 radeon_add_legacy_encoder(dev,
1717 radeon_get_encoder_id(dev, 1722 radeon_get_encoder_enum(dev,
1718 ATOM_DEVICE_CRT1_SUPPORT, 1723 ATOM_DEVICE_CRT1_SUPPORT,
1719 1), 1724 1),
1720 ATOM_DEVICE_CRT1_SUPPORT); 1725 ATOM_DEVICE_CRT1_SUPPORT);
@@ -1728,7 +1733,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1728 ddc_i2c.valid = false; 1733 ddc_i2c.valid = false;
1729 hpd.hpd = RADEON_HPD_NONE; 1734 hpd.hpd = RADEON_HPD_NONE;
1730 radeon_add_legacy_encoder(dev, 1735 radeon_add_legacy_encoder(dev,
1731 radeon_get_encoder_id(dev, 1736 radeon_get_encoder_enum(dev,
1732 ATOM_DEVICE_TV1_SUPPORT, 1737 ATOM_DEVICE_TV1_SUPPORT,
1733 2), 1738 2),
1734 ATOM_DEVICE_TV1_SUPPORT); 1739 ATOM_DEVICE_TV1_SUPPORT);
@@ -1745,7 +1750,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1745 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0); 1750 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
1746 hpd.hpd = RADEON_HPD_NONE; 1751 hpd.hpd = RADEON_HPD_NONE;
1747 radeon_add_legacy_encoder(dev, 1752 radeon_add_legacy_encoder(dev,
1748 radeon_get_encoder_id(dev, 1753 radeon_get_encoder_enum(dev,
1749 ATOM_DEVICE_LCD1_SUPPORT, 1754 ATOM_DEVICE_LCD1_SUPPORT,
1750 0), 1755 0),
1751 ATOM_DEVICE_LCD1_SUPPORT); 1756 ATOM_DEVICE_LCD1_SUPPORT);
@@ -1757,7 +1762,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1757 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); 1762 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
1758 hpd.hpd = RADEON_HPD_NONE; 1763 hpd.hpd = RADEON_HPD_NONE;
1759 radeon_add_legacy_encoder(dev, 1764 radeon_add_legacy_encoder(dev,
1760 radeon_get_encoder_id(dev, 1765 radeon_get_encoder_enum(dev,
1761 ATOM_DEVICE_CRT1_SUPPORT, 1766 ATOM_DEVICE_CRT1_SUPPORT,
1762 1), 1767 1),
1763 ATOM_DEVICE_CRT1_SUPPORT); 1768 ATOM_DEVICE_CRT1_SUPPORT);
@@ -1769,7 +1774,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1769 ddc_i2c.valid = false; 1774 ddc_i2c.valid = false;
1770 hpd.hpd = RADEON_HPD_NONE; 1775 hpd.hpd = RADEON_HPD_NONE;
1771 radeon_add_legacy_encoder(dev, 1776 radeon_add_legacy_encoder(dev,
1772 radeon_get_encoder_id(dev, 1777 radeon_get_encoder_enum(dev,
1773 ATOM_DEVICE_TV1_SUPPORT, 1778 ATOM_DEVICE_TV1_SUPPORT,
1774 2), 1779 2),
1775 ATOM_DEVICE_TV1_SUPPORT); 1780 ATOM_DEVICE_TV1_SUPPORT);
@@ -1786,12 +1791,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1786 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0); 1791 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
1787 hpd.hpd = RADEON_HPD_2; /* ??? */ 1792 hpd.hpd = RADEON_HPD_2; /* ??? */
1788 radeon_add_legacy_encoder(dev, 1793 radeon_add_legacy_encoder(dev,
1789 radeon_get_encoder_id(dev, 1794 radeon_get_encoder_enum(dev,
1790 ATOM_DEVICE_DFP2_SUPPORT, 1795 ATOM_DEVICE_DFP2_SUPPORT,
1791 0), 1796 0),
1792 ATOM_DEVICE_DFP2_SUPPORT); 1797 ATOM_DEVICE_DFP2_SUPPORT);
1793 radeon_add_legacy_encoder(dev, 1798 radeon_add_legacy_encoder(dev,
1794 radeon_get_encoder_id(dev, 1799 radeon_get_encoder_enum(dev,
1795 ATOM_DEVICE_CRT2_SUPPORT, 1800 ATOM_DEVICE_CRT2_SUPPORT,
1796 2), 1801 2),
1797 ATOM_DEVICE_CRT2_SUPPORT); 1802 ATOM_DEVICE_CRT2_SUPPORT);
@@ -1806,7 +1811,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1806 ddc_i2c.valid = false; 1811 ddc_i2c.valid = false;
1807 hpd.hpd = RADEON_HPD_NONE; 1812 hpd.hpd = RADEON_HPD_NONE;
1808 radeon_add_legacy_encoder(dev, 1813 radeon_add_legacy_encoder(dev,
1809 radeon_get_encoder_id(dev, 1814 radeon_get_encoder_enum(dev,
1810 ATOM_DEVICE_TV1_SUPPORT, 1815 ATOM_DEVICE_TV1_SUPPORT,
1811 2), 1816 2),
1812 ATOM_DEVICE_TV1_SUPPORT); 1817 ATOM_DEVICE_TV1_SUPPORT);
@@ -1823,12 +1828,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1823 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0); 1828 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
1824 hpd.hpd = RADEON_HPD_1; /* ??? */ 1829 hpd.hpd = RADEON_HPD_1; /* ??? */
1825 radeon_add_legacy_encoder(dev, 1830 radeon_add_legacy_encoder(dev,
1826 radeon_get_encoder_id(dev, 1831 radeon_get_encoder_enum(dev,
1827 ATOM_DEVICE_DFP1_SUPPORT, 1832 ATOM_DEVICE_DFP1_SUPPORT,
1828 0), 1833 0),
1829 ATOM_DEVICE_DFP1_SUPPORT); 1834 ATOM_DEVICE_DFP1_SUPPORT);
1830 radeon_add_legacy_encoder(dev, 1835 radeon_add_legacy_encoder(dev,
1831 radeon_get_encoder_id(dev, 1836 radeon_get_encoder_enum(dev,
1832 ATOM_DEVICE_CRT2_SUPPORT, 1837 ATOM_DEVICE_CRT2_SUPPORT,
1833 2), 1838 2),
1834 ATOM_DEVICE_CRT2_SUPPORT); 1839 ATOM_DEVICE_CRT2_SUPPORT);
@@ -1842,7 +1847,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1842 ddc_i2c.valid = false; 1847 ddc_i2c.valid = false;
1843 hpd.hpd = RADEON_HPD_NONE; 1848 hpd.hpd = RADEON_HPD_NONE;
1844 radeon_add_legacy_encoder(dev, 1849 radeon_add_legacy_encoder(dev,
1845 radeon_get_encoder_id(dev, 1850 radeon_get_encoder_enum(dev,
1846 ATOM_DEVICE_TV1_SUPPORT, 1851 ATOM_DEVICE_TV1_SUPPORT,
1847 2), 1852 2),
1848 ATOM_DEVICE_TV1_SUPPORT); 1853 ATOM_DEVICE_TV1_SUPPORT);
@@ -1859,7 +1864,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1859 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0); 1864 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
1860 hpd.hpd = RADEON_HPD_1; /* ??? */ 1865 hpd.hpd = RADEON_HPD_1; /* ??? */
1861 radeon_add_legacy_encoder(dev, 1866 radeon_add_legacy_encoder(dev,
1862 radeon_get_encoder_id(dev, 1867 radeon_get_encoder_enum(dev,
1863 ATOM_DEVICE_DFP1_SUPPORT, 1868 ATOM_DEVICE_DFP1_SUPPORT,
1864 0), 1869 0),
1865 ATOM_DEVICE_DFP1_SUPPORT); 1870 ATOM_DEVICE_DFP1_SUPPORT);
@@ -1871,7 +1876,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1871 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0); 1876 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
1872 hpd.hpd = RADEON_HPD_NONE; 1877 hpd.hpd = RADEON_HPD_NONE;
1873 radeon_add_legacy_encoder(dev, 1878 radeon_add_legacy_encoder(dev,
1874 radeon_get_encoder_id(dev, 1879 radeon_get_encoder_enum(dev,
1875 ATOM_DEVICE_CRT2_SUPPORT, 1880 ATOM_DEVICE_CRT2_SUPPORT,
1876 2), 1881 2),
1877 ATOM_DEVICE_CRT2_SUPPORT); 1882 ATOM_DEVICE_CRT2_SUPPORT);
@@ -1883,7 +1888,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1883 ddc_i2c.valid = false; 1888 ddc_i2c.valid = false;
1884 hpd.hpd = RADEON_HPD_NONE; 1889 hpd.hpd = RADEON_HPD_NONE;
1885 radeon_add_legacy_encoder(dev, 1890 radeon_add_legacy_encoder(dev,
1886 radeon_get_encoder_id(dev, 1891 radeon_get_encoder_enum(dev,
1887 ATOM_DEVICE_TV1_SUPPORT, 1892 ATOM_DEVICE_TV1_SUPPORT,
1888 2), 1893 2),
1889 ATOM_DEVICE_TV1_SUPPORT); 1894 ATOM_DEVICE_TV1_SUPPORT);
@@ -1900,7 +1905,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1900 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); 1905 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
1901 hpd.hpd = RADEON_HPD_NONE; 1906 hpd.hpd = RADEON_HPD_NONE;
1902 radeon_add_legacy_encoder(dev, 1907 radeon_add_legacy_encoder(dev,
1903 radeon_get_encoder_id(dev, 1908 radeon_get_encoder_enum(dev,
1904 ATOM_DEVICE_CRT1_SUPPORT, 1909 ATOM_DEVICE_CRT1_SUPPORT,
1905 1), 1910 1),
1906 ATOM_DEVICE_CRT1_SUPPORT); 1911 ATOM_DEVICE_CRT1_SUPPORT);
@@ -1912,7 +1917,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1912 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0); 1917 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
1913 hpd.hpd = RADEON_HPD_NONE; 1918 hpd.hpd = RADEON_HPD_NONE;
1914 radeon_add_legacy_encoder(dev, 1919 radeon_add_legacy_encoder(dev,
1915 radeon_get_encoder_id(dev, 1920 radeon_get_encoder_enum(dev,
1916 ATOM_DEVICE_CRT2_SUPPORT, 1921 ATOM_DEVICE_CRT2_SUPPORT,
1917 2), 1922 2),
1918 ATOM_DEVICE_CRT2_SUPPORT); 1923 ATOM_DEVICE_CRT2_SUPPORT);
@@ -1924,7 +1929,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1924 ddc_i2c.valid = false; 1929 ddc_i2c.valid = false;
1925 hpd.hpd = RADEON_HPD_NONE; 1930 hpd.hpd = RADEON_HPD_NONE;
1926 radeon_add_legacy_encoder(dev, 1931 radeon_add_legacy_encoder(dev,
1927 radeon_get_encoder_id(dev, 1932 radeon_get_encoder_enum(dev,
1928 ATOM_DEVICE_TV1_SUPPORT, 1933 ATOM_DEVICE_TV1_SUPPORT,
1929 2), 1934 2),
1930 ATOM_DEVICE_TV1_SUPPORT); 1935 ATOM_DEVICE_TV1_SUPPORT);
@@ -1941,7 +1946,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1941 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); 1946 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
1942 hpd.hpd = RADEON_HPD_NONE; 1947 hpd.hpd = RADEON_HPD_NONE;
1943 radeon_add_legacy_encoder(dev, 1948 radeon_add_legacy_encoder(dev,
1944 radeon_get_encoder_id(dev, 1949 radeon_get_encoder_enum(dev,
1945 ATOM_DEVICE_CRT1_SUPPORT, 1950 ATOM_DEVICE_CRT1_SUPPORT,
1946 1), 1951 1),
1947 ATOM_DEVICE_CRT1_SUPPORT); 1952 ATOM_DEVICE_CRT1_SUPPORT);
@@ -1952,7 +1957,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1952 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0); 1957 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
1953 hpd.hpd = RADEON_HPD_NONE; 1958 hpd.hpd = RADEON_HPD_NONE;
1954 radeon_add_legacy_encoder(dev, 1959 radeon_add_legacy_encoder(dev,
1955 radeon_get_encoder_id(dev, 1960 radeon_get_encoder_enum(dev,
1956 ATOM_DEVICE_CRT2_SUPPORT, 1961 ATOM_DEVICE_CRT2_SUPPORT,
1957 2), 1962 2),
1958 ATOM_DEVICE_CRT2_SUPPORT); 1963 ATOM_DEVICE_CRT2_SUPPORT);
@@ -1961,6 +1966,48 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1961 CONNECTOR_OBJECT_ID_VGA, 1966 CONNECTOR_OBJECT_ID_VGA,
1962 &hpd); 1967 &hpd);
1963 break; 1968 break;
1969 case CT_MAC_X800:
1970 DRM_INFO("Connector Table: %d (mac x800)\n",
1971 rdev->mode_info.connector_table);
1972 /* DVI - primary dac, internal tmds */
1973 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
1974 hpd.hpd = RADEON_HPD_1; /* ??? */
1975 radeon_add_legacy_encoder(dev,
1976 radeon_get_encoder_enum(dev,
1977 ATOM_DEVICE_DFP1_SUPPORT,
1978 0),
1979 ATOM_DEVICE_DFP1_SUPPORT);
1980 radeon_add_legacy_encoder(dev,
1981 radeon_get_encoder_enum(dev,
1982 ATOM_DEVICE_CRT1_SUPPORT,
1983 1),
1984 ATOM_DEVICE_CRT1_SUPPORT);
1985 radeon_add_legacy_connector(dev, 0,
1986 ATOM_DEVICE_DFP1_SUPPORT |
1987 ATOM_DEVICE_CRT1_SUPPORT,
1988 DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
1989 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
1990 &hpd);
1991 /* DVI - tv dac, dvo */
1992 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
1993 hpd.hpd = RADEON_HPD_2; /* ??? */
1994 radeon_add_legacy_encoder(dev,
1995 radeon_get_encoder_enum(dev,
1996 ATOM_DEVICE_DFP2_SUPPORT,
1997 0),
1998 ATOM_DEVICE_DFP2_SUPPORT);
1999 radeon_add_legacy_encoder(dev,
2000 radeon_get_encoder_enum(dev,
2001 ATOM_DEVICE_CRT2_SUPPORT,
2002 2),
2003 ATOM_DEVICE_CRT2_SUPPORT);
2004 radeon_add_legacy_connector(dev, 1,
2005 ATOM_DEVICE_DFP2_SUPPORT |
2006 ATOM_DEVICE_CRT2_SUPPORT,
2007 DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
2008 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I,
2009 &hpd);
2010 break;
1964 default: 2011 default:
1965 DRM_INFO("Connector table: %d (invalid)\n", 2012 DRM_INFO("Connector table: %d (invalid)\n",
1966 rdev->mode_info.connector_table); 2013 rdev->mode_info.connector_table);
@@ -2109,7 +2156,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
2109 else 2156 else
2110 devices = ATOM_DEVICE_DFP1_SUPPORT; 2157 devices = ATOM_DEVICE_DFP1_SUPPORT;
2111 radeon_add_legacy_encoder(dev, 2158 radeon_add_legacy_encoder(dev,
2112 radeon_get_encoder_id 2159 radeon_get_encoder_enum
2113 (dev, devices, 0), 2160 (dev, devices, 0),
2114 devices); 2161 devices);
2115 radeon_add_legacy_connector(dev, i, devices, 2162 radeon_add_legacy_connector(dev, i, devices,
@@ -2123,7 +2170,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
2123 if (tmp & 0x1) { 2170 if (tmp & 0x1) {
2124 devices = ATOM_DEVICE_CRT2_SUPPORT; 2171 devices = ATOM_DEVICE_CRT2_SUPPORT;
2125 radeon_add_legacy_encoder(dev, 2172 radeon_add_legacy_encoder(dev,
2126 radeon_get_encoder_id 2173 radeon_get_encoder_enum
2127 (dev, 2174 (dev,
2128 ATOM_DEVICE_CRT2_SUPPORT, 2175 ATOM_DEVICE_CRT2_SUPPORT,
2129 2), 2176 2),
@@ -2131,7 +2178,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
2131 } else { 2178 } else {
2132 devices = ATOM_DEVICE_CRT1_SUPPORT; 2179 devices = ATOM_DEVICE_CRT1_SUPPORT;
2133 radeon_add_legacy_encoder(dev, 2180 radeon_add_legacy_encoder(dev,
2134 radeon_get_encoder_id 2181 radeon_get_encoder_enum
2135 (dev, 2182 (dev,
2136 ATOM_DEVICE_CRT1_SUPPORT, 2183 ATOM_DEVICE_CRT1_SUPPORT,
2137 1), 2184 1),
@@ -2151,7 +2198,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
2151 if (tmp & 0x1) { 2198 if (tmp & 0x1) {
2152 devices |= ATOM_DEVICE_CRT2_SUPPORT; 2199 devices |= ATOM_DEVICE_CRT2_SUPPORT;
2153 radeon_add_legacy_encoder(dev, 2200 radeon_add_legacy_encoder(dev,
2154 radeon_get_encoder_id 2201 radeon_get_encoder_enum
2155 (dev, 2202 (dev,
2156 ATOM_DEVICE_CRT2_SUPPORT, 2203 ATOM_DEVICE_CRT2_SUPPORT,
2157 2), 2204 2),
@@ -2159,7 +2206,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
2159 } else { 2206 } else {
2160 devices |= ATOM_DEVICE_CRT1_SUPPORT; 2207 devices |= ATOM_DEVICE_CRT1_SUPPORT;
2161 radeon_add_legacy_encoder(dev, 2208 radeon_add_legacy_encoder(dev,
2162 radeon_get_encoder_id 2209 radeon_get_encoder_enum
2163 (dev, 2210 (dev,
2164 ATOM_DEVICE_CRT1_SUPPORT, 2211 ATOM_DEVICE_CRT1_SUPPORT,
2165 1), 2212 1),
@@ -2168,7 +2215,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
2168 if ((tmp >> 4) & 0x1) { 2215 if ((tmp >> 4) & 0x1) {
2169 devices |= ATOM_DEVICE_DFP2_SUPPORT; 2216 devices |= ATOM_DEVICE_DFP2_SUPPORT;
2170 radeon_add_legacy_encoder(dev, 2217 radeon_add_legacy_encoder(dev,
2171 radeon_get_encoder_id 2218 radeon_get_encoder_enum
2172 (dev, 2219 (dev,
2173 ATOM_DEVICE_DFP2_SUPPORT, 2220 ATOM_DEVICE_DFP2_SUPPORT,
2174 0), 2221 0),
@@ -2177,7 +2224,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
2177 } else { 2224 } else {
2178 devices |= ATOM_DEVICE_DFP1_SUPPORT; 2225 devices |= ATOM_DEVICE_DFP1_SUPPORT;
2179 radeon_add_legacy_encoder(dev, 2226 radeon_add_legacy_encoder(dev,
2180 radeon_get_encoder_id 2227 radeon_get_encoder_enum
2181 (dev, 2228 (dev,
2182 ATOM_DEVICE_DFP1_SUPPORT, 2229 ATOM_DEVICE_DFP1_SUPPORT,
2183 0), 2230 0),
@@ -2202,7 +2249,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
2202 connector_object_id = CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I; 2249 connector_object_id = CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I;
2203 } 2250 }
2204 radeon_add_legacy_encoder(dev, 2251 radeon_add_legacy_encoder(dev,
2205 radeon_get_encoder_id 2252 radeon_get_encoder_enum
2206 (dev, devices, 0), 2253 (dev, devices, 0),
2207 devices); 2254 devices);
2208 radeon_add_legacy_connector(dev, i, devices, 2255 radeon_add_legacy_connector(dev, i, devices,
@@ -2215,7 +2262,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
2215 case CONNECTOR_CTV_LEGACY: 2262 case CONNECTOR_CTV_LEGACY:
2216 case CONNECTOR_STV_LEGACY: 2263 case CONNECTOR_STV_LEGACY:
2217 radeon_add_legacy_encoder(dev, 2264 radeon_add_legacy_encoder(dev,
2218 radeon_get_encoder_id 2265 radeon_get_encoder_enum
2219 (dev, 2266 (dev,
2220 ATOM_DEVICE_TV1_SUPPORT, 2267 ATOM_DEVICE_TV1_SUPPORT,
2221 2), 2268 2),
@@ -2242,12 +2289,12 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
2242 DRM_DEBUG_KMS("Found DFP table, assuming DVI connector\n"); 2289 DRM_DEBUG_KMS("Found DFP table, assuming DVI connector\n");
2243 2290
2244 radeon_add_legacy_encoder(dev, 2291 radeon_add_legacy_encoder(dev,
2245 radeon_get_encoder_id(dev, 2292 radeon_get_encoder_enum(dev,
2246 ATOM_DEVICE_CRT1_SUPPORT, 2293 ATOM_DEVICE_CRT1_SUPPORT,
2247 1), 2294 1),
2248 ATOM_DEVICE_CRT1_SUPPORT); 2295 ATOM_DEVICE_CRT1_SUPPORT);
2249 radeon_add_legacy_encoder(dev, 2296 radeon_add_legacy_encoder(dev,
2250 radeon_get_encoder_id(dev, 2297 radeon_get_encoder_enum(dev,
2251 ATOM_DEVICE_DFP1_SUPPORT, 2298 ATOM_DEVICE_DFP1_SUPPORT,
2252 0), 2299 0),
2253 ATOM_DEVICE_DFP1_SUPPORT); 2300 ATOM_DEVICE_DFP1_SUPPORT);
@@ -2268,7 +2315,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
2268 DRM_DEBUG_KMS("Found CRT table, assuming VGA connector\n"); 2315 DRM_DEBUG_KMS("Found CRT table, assuming VGA connector\n");
2269 if (crt_info) { 2316 if (crt_info) {
2270 radeon_add_legacy_encoder(dev, 2317 radeon_add_legacy_encoder(dev,
2271 radeon_get_encoder_id(dev, 2318 radeon_get_encoder_enum(dev,
2272 ATOM_DEVICE_CRT1_SUPPORT, 2319 ATOM_DEVICE_CRT1_SUPPORT,
2273 1), 2320 1),
2274 ATOM_DEVICE_CRT1_SUPPORT); 2321 ATOM_DEVICE_CRT1_SUPPORT);
@@ -2297,7 +2344,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
2297 COMBIOS_LCD_DDC_INFO_TABLE); 2344 COMBIOS_LCD_DDC_INFO_TABLE);
2298 2345
2299 radeon_add_legacy_encoder(dev, 2346 radeon_add_legacy_encoder(dev,
2300 radeon_get_encoder_id(dev, 2347 radeon_get_encoder_enum(dev,
2301 ATOM_DEVICE_LCD1_SUPPORT, 2348 ATOM_DEVICE_LCD1_SUPPORT,
2302 0), 2349 0),
2303 ATOM_DEVICE_LCD1_SUPPORT); 2350 ATOM_DEVICE_LCD1_SUPPORT);
@@ -2351,7 +2398,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
2351 hpd.hpd = RADEON_HPD_NONE; 2398 hpd.hpd = RADEON_HPD_NONE;
2352 ddc_i2c.valid = false; 2399 ddc_i2c.valid = false;
2353 radeon_add_legacy_encoder(dev, 2400 radeon_add_legacy_encoder(dev,
2354 radeon_get_encoder_id 2401 radeon_get_encoder_enum
2355 (dev, 2402 (dev,
2356 ATOM_DEVICE_TV1_SUPPORT, 2403 ATOM_DEVICE_TV1_SUPPORT,
2357 2), 2404 2),
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 47c4b276d30c..ecc1a8fafbfd 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -481,7 +481,8 @@ static int radeon_lvds_mode_valid(struct drm_connector *connector,
481 return MODE_OK; 481 return MODE_OK;
482} 482}
483 483
484static enum drm_connector_status radeon_lvds_detect(struct drm_connector *connector) 484static enum drm_connector_status
485radeon_lvds_detect(struct drm_connector *connector, bool force)
485{ 486{
486 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 487 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
487 struct drm_encoder *encoder = radeon_best_single_encoder(connector); 488 struct drm_encoder *encoder = radeon_best_single_encoder(connector);
@@ -594,7 +595,8 @@ static int radeon_vga_mode_valid(struct drm_connector *connector,
594 return MODE_OK; 595 return MODE_OK;
595} 596}
596 597
597static enum drm_connector_status radeon_vga_detect(struct drm_connector *connector) 598static enum drm_connector_status
599radeon_vga_detect(struct drm_connector *connector, bool force)
598{ 600{
599 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 601 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
600 struct drm_encoder *encoder; 602 struct drm_encoder *encoder;
@@ -691,7 +693,8 @@ static int radeon_tv_mode_valid(struct drm_connector *connector,
691 return MODE_OK; 693 return MODE_OK;
692} 694}
693 695
694static enum drm_connector_status radeon_tv_detect(struct drm_connector *connector) 696static enum drm_connector_status
697radeon_tv_detect(struct drm_connector *connector, bool force)
695{ 698{
696 struct drm_encoder *encoder; 699 struct drm_encoder *encoder;
697 struct drm_encoder_helper_funcs *encoder_funcs; 700 struct drm_encoder_helper_funcs *encoder_funcs;
@@ -748,7 +751,8 @@ static int radeon_dvi_get_modes(struct drm_connector *connector)
748 * we have to check if this analog encoder is shared with anyone else (TV) 751 * we have to check if this analog encoder is shared with anyone else (TV)
749 * if its shared we have to set the other connector to disconnected. 752 * if its shared we have to set the other connector to disconnected.
750 */ 753 */
751static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connector) 754static enum drm_connector_status
755radeon_dvi_detect(struct drm_connector *connector, bool force)
752{ 756{
753 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 757 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
754 struct drm_encoder *encoder = NULL; 758 struct drm_encoder *encoder = NULL;
@@ -972,32 +976,35 @@ static int radeon_dp_get_modes(struct drm_connector *connector)
972 return ret; 976 return ret;
973} 977}
974 978
975static enum drm_connector_status radeon_dp_detect(struct drm_connector *connector) 979static enum drm_connector_status
980radeon_dp_detect(struct drm_connector *connector, bool force)
976{ 981{
977 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 982 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
978 enum drm_connector_status ret = connector_status_disconnected; 983 enum drm_connector_status ret = connector_status_disconnected;
979 struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv; 984 struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
980 u8 sink_type;
981 985
982 if (radeon_connector->edid) { 986 if (radeon_connector->edid) {
983 kfree(radeon_connector->edid); 987 kfree(radeon_connector->edid);
984 radeon_connector->edid = NULL; 988 radeon_connector->edid = NULL;
985 } 989 }
986 990
987 sink_type = radeon_dp_getsinktype(radeon_connector); 991 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
988 if ((sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || 992 /* eDP is always DP */
989 (sink_type == CONNECTOR_OBJECT_ID_eDP)) { 993 radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT;
990 if (radeon_dp_getdpcd(radeon_connector)) { 994 if (radeon_dp_getdpcd(radeon_connector))
991 radeon_dig_connector->dp_sink_type = sink_type;
992 ret = connector_status_connected; 995 ret = connector_status_connected;
993 }
994 } else { 996 } else {
995 if (radeon_ddc_probe(radeon_connector)) { 997 radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector);
996 radeon_dig_connector->dp_sink_type = sink_type; 998 if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
997 ret = connector_status_connected; 999 if (radeon_dp_getdpcd(radeon_connector))
1000 ret = connector_status_connected;
1001 } else {
1002 if (radeon_ddc_probe(radeon_connector))
1003 ret = connector_status_connected;
998 } 1004 }
999 } 1005 }
1000 1006
1007 radeon_connector_update_scratch_regs(connector, ret);
1001 return ret; 1008 return ret;
1002} 1009}
1003 1010
@@ -1037,7 +1044,6 @@ radeon_add_atom_connector(struct drm_device *dev,
1037 uint32_t supported_device, 1044 uint32_t supported_device,
1038 int connector_type, 1045 int connector_type,
1039 struct radeon_i2c_bus_rec *i2c_bus, 1046 struct radeon_i2c_bus_rec *i2c_bus,
1040 bool linkb,
1041 uint32_t igp_lane_info, 1047 uint32_t igp_lane_info,
1042 uint16_t connector_object_id, 1048 uint16_t connector_object_id,
1043 struct radeon_hpd *hpd, 1049 struct radeon_hpd *hpd,
@@ -1050,10 +1056,16 @@ radeon_add_atom_connector(struct drm_device *dev,
1050 uint32_t subpixel_order = SubPixelNone; 1056 uint32_t subpixel_order = SubPixelNone;
1051 bool shared_ddc = false; 1057 bool shared_ddc = false;
1052 1058
1053 /* fixme - tv/cv/din */
1054 if (connector_type == DRM_MODE_CONNECTOR_Unknown) 1059 if (connector_type == DRM_MODE_CONNECTOR_Unknown)
1055 return; 1060 return;
1056 1061
1062 /* if the user selected tv=0 don't try and add the connector */
1063 if (((connector_type == DRM_MODE_CONNECTOR_SVIDEO) ||
1064 (connector_type == DRM_MODE_CONNECTOR_Composite) ||
1065 (connector_type == DRM_MODE_CONNECTOR_9PinDIN)) &&
1066 (radeon_tv == 0))
1067 return;
1068
1057 /* see if we already added it */ 1069 /* see if we already added it */
1058 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 1070 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1059 radeon_connector = to_radeon_connector(connector); 1071 radeon_connector = to_radeon_connector(connector);
@@ -1128,7 +1140,6 @@ radeon_add_atom_connector(struct drm_device *dev,
1128 radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); 1140 radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
1129 if (!radeon_dig_connector) 1141 if (!radeon_dig_connector)
1130 goto failed; 1142 goto failed;
1131 radeon_dig_connector->linkb = linkb;
1132 radeon_dig_connector->igp_lane_info = igp_lane_info; 1143 radeon_dig_connector->igp_lane_info = igp_lane_info;
1133 radeon_connector->con_priv = radeon_dig_connector; 1144 radeon_connector->con_priv = radeon_dig_connector;
1134 drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); 1145 drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
@@ -1158,7 +1169,6 @@ radeon_add_atom_connector(struct drm_device *dev,
1158 radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); 1169 radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
1159 if (!radeon_dig_connector) 1170 if (!radeon_dig_connector)
1160 goto failed; 1171 goto failed;
1161 radeon_dig_connector->linkb = linkb;
1162 radeon_dig_connector->igp_lane_info = igp_lane_info; 1172 radeon_dig_connector->igp_lane_info = igp_lane_info;
1163 radeon_connector->con_priv = radeon_dig_connector; 1173 radeon_connector->con_priv = radeon_dig_connector;
1164 drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); 1174 drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
@@ -1182,7 +1192,6 @@ radeon_add_atom_connector(struct drm_device *dev,
1182 radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); 1192 radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
1183 if (!radeon_dig_connector) 1193 if (!radeon_dig_connector)
1184 goto failed; 1194 goto failed;
1185 radeon_dig_connector->linkb = linkb;
1186 radeon_dig_connector->igp_lane_info = igp_lane_info; 1195 radeon_dig_connector->igp_lane_info = igp_lane_info;
1187 radeon_connector->con_priv = radeon_dig_connector; 1196 radeon_connector->con_priv = radeon_dig_connector;
1188 drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type); 1197 drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
@@ -1211,25 +1220,22 @@ radeon_add_atom_connector(struct drm_device *dev,
1211 case DRM_MODE_CONNECTOR_SVIDEO: 1220 case DRM_MODE_CONNECTOR_SVIDEO:
1212 case DRM_MODE_CONNECTOR_Composite: 1221 case DRM_MODE_CONNECTOR_Composite:
1213 case DRM_MODE_CONNECTOR_9PinDIN: 1222 case DRM_MODE_CONNECTOR_9PinDIN:
1214 if (radeon_tv == 1) { 1223 drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
1215 drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type); 1224 drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
1216 drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs); 1225 radeon_connector->dac_load_detect = true;
1217 radeon_connector->dac_load_detect = true; 1226 drm_connector_attach_property(&radeon_connector->base,
1218 drm_connector_attach_property(&radeon_connector->base, 1227 rdev->mode_info.load_detect_property,
1219 rdev->mode_info.load_detect_property, 1228 1);
1220 1); 1229 drm_connector_attach_property(&radeon_connector->base,
1221 drm_connector_attach_property(&radeon_connector->base, 1230 rdev->mode_info.tv_std_property,
1222 rdev->mode_info.tv_std_property, 1231 radeon_atombios_get_tv_info(rdev));
1223 radeon_atombios_get_tv_info(rdev)); 1232 /* no HPD on analog connectors */
1224 /* no HPD on analog connectors */ 1233 radeon_connector->hpd.hpd = RADEON_HPD_NONE;
1225 radeon_connector->hpd.hpd = RADEON_HPD_NONE;
1226 }
1227 break; 1234 break;
1228 case DRM_MODE_CONNECTOR_LVDS: 1235 case DRM_MODE_CONNECTOR_LVDS:
1229 radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); 1236 radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
1230 if (!radeon_dig_connector) 1237 if (!radeon_dig_connector)
1231 goto failed; 1238 goto failed;
1232 radeon_dig_connector->linkb = linkb;
1233 radeon_dig_connector->igp_lane_info = igp_lane_info; 1239 radeon_dig_connector->igp_lane_info = igp_lane_info;
1234 radeon_connector->con_priv = radeon_dig_connector; 1240 radeon_connector->con_priv = radeon_dig_connector;
1235 drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type); 1241 drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
@@ -1275,10 +1281,16 @@ radeon_add_legacy_connector(struct drm_device *dev,
1275 struct radeon_connector *radeon_connector; 1281 struct radeon_connector *radeon_connector;
1276 uint32_t subpixel_order = SubPixelNone; 1282 uint32_t subpixel_order = SubPixelNone;
1277 1283
1278 /* fixme - tv/cv/din */
1279 if (connector_type == DRM_MODE_CONNECTOR_Unknown) 1284 if (connector_type == DRM_MODE_CONNECTOR_Unknown)
1280 return; 1285 return;
1281 1286
1287 /* if the user selected tv=0 don't try and add the connector */
1288 if (((connector_type == DRM_MODE_CONNECTOR_SVIDEO) ||
1289 (connector_type == DRM_MODE_CONNECTOR_Composite) ||
1290 (connector_type == DRM_MODE_CONNECTOR_9PinDIN)) &&
1291 (radeon_tv == 0))
1292 return;
1293
1282 /* see if we already added it */ 1294 /* see if we already added it */
1283 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 1295 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1284 radeon_connector = to_radeon_connector(connector); 1296 radeon_connector = to_radeon_connector(connector);
@@ -1350,26 +1362,24 @@ radeon_add_legacy_connector(struct drm_device *dev,
1350 case DRM_MODE_CONNECTOR_SVIDEO: 1362 case DRM_MODE_CONNECTOR_SVIDEO:
1351 case DRM_MODE_CONNECTOR_Composite: 1363 case DRM_MODE_CONNECTOR_Composite:
1352 case DRM_MODE_CONNECTOR_9PinDIN: 1364 case DRM_MODE_CONNECTOR_9PinDIN:
1353 if (radeon_tv == 1) { 1365 drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
1354 drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type); 1366 drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
1355 drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs); 1367 radeon_connector->dac_load_detect = true;
1356 radeon_connector->dac_load_detect = true; 1368 /* RS400,RC410,RS480 chipset seems to report a lot
1357 /* RS400,RC410,RS480 chipset seems to report a lot 1369 * of false positive on load detect, we haven't yet
1358 * of false positive on load detect, we haven't yet 1370 * found a way to make load detect reliable on those
1359 * found a way to make load detect reliable on those 1371 * chipset, thus just disable it for TV.
1360 * chipset, thus just disable it for TV. 1372 */
1361 */ 1373 if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480)
1362 if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) 1374 radeon_connector->dac_load_detect = false;
1363 radeon_connector->dac_load_detect = false; 1375 drm_connector_attach_property(&radeon_connector->base,
1364 drm_connector_attach_property(&radeon_connector->base, 1376 rdev->mode_info.load_detect_property,
1365 rdev->mode_info.load_detect_property, 1377 radeon_connector->dac_load_detect);
1366 radeon_connector->dac_load_detect); 1378 drm_connector_attach_property(&radeon_connector->base,
1367 drm_connector_attach_property(&radeon_connector->base, 1379 rdev->mode_info.tv_std_property,
1368 rdev->mode_info.tv_std_property, 1380 radeon_combios_get_tv_info(rdev));
1369 radeon_combios_get_tv_info(rdev)); 1381 /* no HPD on analog connectors */
1370 /* no HPD on analog connectors */ 1382 radeon_connector->hpd.hpd = RADEON_HPD_NONE;
1371 radeon_connector->hpd.hpd = RADEON_HPD_NONE;
1372 }
1373 break; 1383 break;
1374 case DRM_MODE_CONNECTOR_LVDS: 1384 case DRM_MODE_CONNECTOR_LVDS:
1375 drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type); 1385 drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 4f7a170d1566..256d204a6d24 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -199,7 +199,7 @@ void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64
199 mc->mc_vram_size = mc->aper_size; 199 mc->mc_vram_size = mc->aper_size;
200 } 200 }
201 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; 201 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
202 if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_end <= mc->gtt_end) { 202 if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
203 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n"); 203 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
204 mc->real_vram_size = mc->aper_size; 204 mc->real_vram_size = mc->aper_size;
205 mc->mc_vram_size = mc->aper_size; 205 mc->mc_vram_size = mc->aper_size;
@@ -293,30 +293,20 @@ bool radeon_card_posted(struct radeon_device *rdev)
293void radeon_update_bandwidth_info(struct radeon_device *rdev) 293void radeon_update_bandwidth_info(struct radeon_device *rdev)
294{ 294{
295 fixed20_12 a; 295 fixed20_12 a;
296 u32 sclk, mclk; 296 u32 sclk = rdev->pm.current_sclk;
297 u32 mclk = rdev->pm.current_mclk;
297 298
298 if (rdev->flags & RADEON_IS_IGP) { 299 /* sclk/mclk in Mhz */
299 sclk = radeon_get_engine_clock(rdev); 300 a.full = dfixed_const(100);
300 mclk = rdev->clock.default_mclk; 301 rdev->pm.sclk.full = dfixed_const(sclk);
301 302 rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
302 a.full = dfixed_const(100); 303 rdev->pm.mclk.full = dfixed_const(mclk);
303 rdev->pm.sclk.full = dfixed_const(sclk); 304 rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
304 rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
305 rdev->pm.mclk.full = dfixed_const(mclk);
306 rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
307 305
306 if (rdev->flags & RADEON_IS_IGP) {
308 a.full = dfixed_const(16); 307 a.full = dfixed_const(16);
309 /* core_bandwidth = sclk(Mhz) * 16 */ 308 /* core_bandwidth = sclk(Mhz) * 16 */
310 rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a); 309 rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
311 } else {
312 sclk = radeon_get_engine_clock(rdev);
313 mclk = radeon_get_memory_clock(rdev);
314
315 a.full = dfixed_const(100);
316 rdev->pm.sclk.full = dfixed_const(sclk);
317 rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
318 rdev->pm.mclk.full = dfixed_const(mclk);
319 rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
320 } 310 }
321} 311}
322 312
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 5764f4d3b4f1..b92d2f2fcbed 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -349,6 +349,8 @@ static void radeon_print_display_setup(struct drm_device *dev)
349 DRM_INFO(" DFP4: %s\n", encoder_names[radeon_encoder->encoder_id]); 349 DRM_INFO(" DFP4: %s\n", encoder_names[radeon_encoder->encoder_id]);
350 if (devices & ATOM_DEVICE_DFP5_SUPPORT) 350 if (devices & ATOM_DEVICE_DFP5_SUPPORT)
351 DRM_INFO(" DFP5: %s\n", encoder_names[radeon_encoder->encoder_id]); 351 DRM_INFO(" DFP5: %s\n", encoder_names[radeon_encoder->encoder_id]);
352 if (devices & ATOM_DEVICE_DFP6_SUPPORT)
353 DRM_INFO(" DFP6: %s\n", encoder_names[radeon_encoder->encoder_id]);
352 if (devices & ATOM_DEVICE_TV1_SUPPORT) 354 if (devices & ATOM_DEVICE_TV1_SUPPORT)
353 DRM_INFO(" TV1: %s\n", encoder_names[radeon_encoder->encoder_id]); 355 DRM_INFO(" TV1: %s\n", encoder_names[radeon_encoder->encoder_id]);
354 if (devices & ATOM_DEVICE_CV_SUPPORT) 356 if (devices & ATOM_DEVICE_CV_SUPPORT)
@@ -841,8 +843,9 @@ static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
841{ 843{
842 struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb); 844 struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
843 845
844 if (radeon_fb->obj) 846 if (radeon_fb->obj) {
845 drm_gem_object_unreference_unlocked(radeon_fb->obj); 847 drm_gem_object_unreference_unlocked(radeon_fb->obj);
848 }
846 drm_framebuffer_cleanup(fb); 849 drm_framebuffer_cleanup(fb);
847 kfree(radeon_fb); 850 kfree(radeon_fb);
848} 851}
@@ -1094,6 +1097,18 @@ void radeon_modeset_fini(struct radeon_device *rdev)
1094 radeon_i2c_fini(rdev); 1097 radeon_i2c_fini(rdev);
1095} 1098}
1096 1099
1100static bool is_hdtv_mode(struct drm_display_mode *mode)
1101{
1102 /* try and guess if this is a tv or a monitor */
1103 if ((mode->vdisplay == 480 && mode->hdisplay == 720) || /* 480p */
1104 (mode->vdisplay == 576) || /* 576p */
1105 (mode->vdisplay == 720) || /* 720p */
1106 (mode->vdisplay == 1080)) /* 1080p */
1107 return true;
1108 else
1109 return false;
1110}
1111
1097bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, 1112bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
1098 struct drm_display_mode *mode, 1113 struct drm_display_mode *mode,
1099 struct drm_display_mode *adjusted_mode) 1114 struct drm_display_mode *adjusted_mode)
@@ -1128,20 +1143,22 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
1128 radeon_crtc->rmx_type = radeon_encoder->rmx_type; 1143 radeon_crtc->rmx_type = radeon_encoder->rmx_type;
1129 else 1144 else
1130 radeon_crtc->rmx_type = RMX_OFF; 1145 radeon_crtc->rmx_type = RMX_OFF;
1131 src_v = crtc->mode.vdisplay;
1132 dst_v = radeon_crtc->native_mode.vdisplay;
1133 src_h = crtc->mode.hdisplay;
1134 dst_h = radeon_crtc->native_mode.vdisplay;
1135 /* copy native mode */ 1146 /* copy native mode */
1136 memcpy(&radeon_crtc->native_mode, 1147 memcpy(&radeon_crtc->native_mode,
1137 &radeon_encoder->native_mode, 1148 &radeon_encoder->native_mode,
1138 sizeof(struct drm_display_mode)); 1149 sizeof(struct drm_display_mode));
1150 src_v = crtc->mode.vdisplay;
1151 dst_v = radeon_crtc->native_mode.vdisplay;
1152 src_h = crtc->mode.hdisplay;
1153 dst_h = radeon_crtc->native_mode.hdisplay;
1139 1154
1140 /* fix up for overscan on hdmi */ 1155 /* fix up for overscan on hdmi */
1141 if (ASIC_IS_AVIVO(rdev) && 1156 if (ASIC_IS_AVIVO(rdev) &&
1157 (!(mode->flags & DRM_MODE_FLAG_INTERLACE)) &&
1142 ((radeon_encoder->underscan_type == UNDERSCAN_ON) || 1158 ((radeon_encoder->underscan_type == UNDERSCAN_ON) ||
1143 ((radeon_encoder->underscan_type == UNDERSCAN_AUTO) && 1159 ((radeon_encoder->underscan_type == UNDERSCAN_AUTO) &&
1144 drm_detect_hdmi_monitor(radeon_connector->edid)))) { 1160 drm_detect_hdmi_monitor(radeon_connector->edid) &&
1161 is_hdtv_mode(mode)))) {
1145 radeon_crtc->h_border = (mode->hdisplay >> 5) + 16; 1162 radeon_crtc->h_border = (mode->hdisplay >> 5) + 16;
1146 radeon_crtc->v_border = (mode->vdisplay >> 5) + 16; 1163 radeon_crtc->v_border = (mode->vdisplay >> 5) + 16;
1147 radeon_crtc->rmx_type = RMX_FULL; 1164 radeon_crtc->rmx_type = RMX_FULL;
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 263c8098d7dd..2c293e8304d6 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -81,7 +81,7 @@ void radeon_setup_encoder_clones(struct drm_device *dev)
81} 81}
82 82
83uint32_t 83uint32_t
84radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, uint8_t dac) 84radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device, uint8_t dac)
85{ 85{
86 struct radeon_device *rdev = dev->dev_private; 86 struct radeon_device *rdev = dev->dev_private;
87 uint32_t ret = 0; 87 uint32_t ret = 0;
@@ -97,59 +97,59 @@ radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, uint8_t
97 if ((rdev->family == CHIP_RS300) || 97 if ((rdev->family == CHIP_RS300) ||
98 (rdev->family == CHIP_RS400) || 98 (rdev->family == CHIP_RS400) ||
99 (rdev->family == CHIP_RS480)) 99 (rdev->family == CHIP_RS480))
100 ret = ENCODER_OBJECT_ID_INTERNAL_DAC2; 100 ret = ENCODER_INTERNAL_DAC2_ENUM_ID1;
101 else if (ASIC_IS_AVIVO(rdev)) 101 else if (ASIC_IS_AVIVO(rdev))
102 ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1; 102 ret = ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1;
103 else 103 else
104 ret = ENCODER_OBJECT_ID_INTERNAL_DAC1; 104 ret = ENCODER_INTERNAL_DAC1_ENUM_ID1;
105 break; 105 break;
106 case 2: /* dac b */ 106 case 2: /* dac b */
107 if (ASIC_IS_AVIVO(rdev)) 107 if (ASIC_IS_AVIVO(rdev))
108 ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2; 108 ret = ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1;
109 else { 109 else {
110 /*if (rdev->family == CHIP_R200) 110 /*if (rdev->family == CHIP_R200)
111 ret = ENCODER_OBJECT_ID_INTERNAL_DVO1; 111 ret = ENCODER_INTERNAL_DVO1_ENUM_ID1;
112 else*/ 112 else*/
113 ret = ENCODER_OBJECT_ID_INTERNAL_DAC2; 113 ret = ENCODER_INTERNAL_DAC2_ENUM_ID1;
114 } 114 }
115 break; 115 break;
116 case 3: /* external dac */ 116 case 3: /* external dac */
117 if (ASIC_IS_AVIVO(rdev)) 117 if (ASIC_IS_AVIVO(rdev))
118 ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1; 118 ret = ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1;
119 else 119 else
120 ret = ENCODER_OBJECT_ID_INTERNAL_DVO1; 120 ret = ENCODER_INTERNAL_DVO1_ENUM_ID1;
121 break; 121 break;
122 } 122 }
123 break; 123 break;
124 case ATOM_DEVICE_LCD1_SUPPORT: 124 case ATOM_DEVICE_LCD1_SUPPORT:
125 if (ASIC_IS_AVIVO(rdev)) 125 if (ASIC_IS_AVIVO(rdev))
126 ret = ENCODER_OBJECT_ID_INTERNAL_LVTM1; 126 ret = ENCODER_INTERNAL_LVTM1_ENUM_ID1;
127 else 127 else
128 ret = ENCODER_OBJECT_ID_INTERNAL_LVDS; 128 ret = ENCODER_INTERNAL_LVDS_ENUM_ID1;
129 break; 129 break;
130 case ATOM_DEVICE_DFP1_SUPPORT: 130 case ATOM_DEVICE_DFP1_SUPPORT:
131 if ((rdev->family == CHIP_RS300) || 131 if ((rdev->family == CHIP_RS300) ||
132 (rdev->family == CHIP_RS400) || 132 (rdev->family == CHIP_RS400) ||
133 (rdev->family == CHIP_RS480)) 133 (rdev->family == CHIP_RS480))
134 ret = ENCODER_OBJECT_ID_INTERNAL_DVO1; 134 ret = ENCODER_INTERNAL_DVO1_ENUM_ID1;
135 else if (ASIC_IS_AVIVO(rdev)) 135 else if (ASIC_IS_AVIVO(rdev))
136 ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1; 136 ret = ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1;
137 else 137 else
138 ret = ENCODER_OBJECT_ID_INTERNAL_TMDS1; 138 ret = ENCODER_INTERNAL_TMDS1_ENUM_ID1;
139 break; 139 break;
140 case ATOM_DEVICE_LCD2_SUPPORT: 140 case ATOM_DEVICE_LCD2_SUPPORT:
141 case ATOM_DEVICE_DFP2_SUPPORT: 141 case ATOM_DEVICE_DFP2_SUPPORT:
142 if ((rdev->family == CHIP_RS600) || 142 if ((rdev->family == CHIP_RS600) ||
143 (rdev->family == CHIP_RS690) || 143 (rdev->family == CHIP_RS690) ||
144 (rdev->family == CHIP_RS740)) 144 (rdev->family == CHIP_RS740))
145 ret = ENCODER_OBJECT_ID_INTERNAL_DDI; 145 ret = ENCODER_INTERNAL_DDI_ENUM_ID1;
146 else if (ASIC_IS_AVIVO(rdev)) 146 else if (ASIC_IS_AVIVO(rdev))
147 ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1; 147 ret = ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1;
148 else 148 else
149 ret = ENCODER_OBJECT_ID_INTERNAL_DVO1; 149 ret = ENCODER_INTERNAL_DVO1_ENUM_ID1;
150 break; 150 break;
151 case ATOM_DEVICE_DFP3_SUPPORT: 151 case ATOM_DEVICE_DFP3_SUPPORT:
152 ret = ENCODER_OBJECT_ID_INTERNAL_LVTM1; 152 ret = ENCODER_INTERNAL_LVTM1_ENUM_ID1;
153 break; 153 break;
154 } 154 }
155 155
@@ -228,32 +228,6 @@ radeon_get_connector_for_encoder(struct drm_encoder *encoder)
228 return NULL; 228 return NULL;
229} 229}
230 230
231static struct radeon_connector_atom_dig *
232radeon_get_atom_connector_priv_from_encoder(struct drm_encoder *encoder)
233{
234 struct drm_device *dev = encoder->dev;
235 struct radeon_device *rdev = dev->dev_private;
236 struct drm_connector *connector;
237 struct radeon_connector *radeon_connector;
238 struct radeon_connector_atom_dig *dig_connector;
239
240 if (!rdev->is_atom_bios)
241 return NULL;
242
243 connector = radeon_get_connector_for_encoder(encoder);
244 if (!connector)
245 return NULL;
246
247 radeon_connector = to_radeon_connector(connector);
248
249 if (!radeon_connector->con_priv)
250 return NULL;
251
252 dig_connector = radeon_connector->con_priv;
253
254 return dig_connector;
255}
256
257void radeon_panel_mode_fixup(struct drm_encoder *encoder, 231void radeon_panel_mode_fixup(struct drm_encoder *encoder,
258 struct drm_display_mode *adjusted_mode) 232 struct drm_display_mode *adjusted_mode)
259{ 233{
@@ -512,14 +486,12 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
512 struct radeon_device *rdev = dev->dev_private; 486 struct radeon_device *rdev = dev->dev_private;
513 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 487 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
514 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 488 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
515 struct radeon_connector_atom_dig *dig_connector =
516 radeon_get_atom_connector_priv_from_encoder(encoder);
517 union lvds_encoder_control args; 489 union lvds_encoder_control args;
518 int index = 0; 490 int index = 0;
519 int hdmi_detected = 0; 491 int hdmi_detected = 0;
520 uint8_t frev, crev; 492 uint8_t frev, crev;
521 493
522 if (!dig || !dig_connector) 494 if (!dig)
523 return; 495 return;
524 496
525 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) 497 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
@@ -562,7 +534,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
562 if (dig->lvds_misc & ATOM_PANEL_MISC_888RGB) 534 if (dig->lvds_misc & ATOM_PANEL_MISC_888RGB)
563 args.v1.ucMisc |= (1 << 1); 535 args.v1.ucMisc |= (1 << 1);
564 } else { 536 } else {
565 if (dig_connector->linkb) 537 if (dig->linkb)
566 args.v1.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB; 538 args.v1.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB;
567 if (radeon_encoder->pixel_clock > 165000) 539 if (radeon_encoder->pixel_clock > 165000)
568 args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL; 540 args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL;
@@ -601,7 +573,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
601 args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_LEVEL_4; 573 args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_LEVEL_4;
602 } 574 }
603 } else { 575 } else {
604 if (dig_connector->linkb) 576 if (dig->linkb)
605 args.v2.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB; 577 args.v2.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB;
606 if (radeon_encoder->pixel_clock > 165000) 578 if (radeon_encoder->pixel_clock > 165000)
607 args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL; 579 args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL;
@@ -623,6 +595,8 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
623int 595int
624atombios_get_encoder_mode(struct drm_encoder *encoder) 596atombios_get_encoder_mode(struct drm_encoder *encoder)
625{ 597{
598 struct drm_device *dev = encoder->dev;
599 struct radeon_device *rdev = dev->dev_private;
626 struct drm_connector *connector; 600 struct drm_connector *connector;
627 struct radeon_connector *radeon_connector; 601 struct radeon_connector *radeon_connector;
628 struct radeon_connector_atom_dig *dig_connector; 602 struct radeon_connector_atom_dig *dig_connector;
@@ -636,9 +610,13 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
636 switch (connector->connector_type) { 610 switch (connector->connector_type) {
637 case DRM_MODE_CONNECTOR_DVII: 611 case DRM_MODE_CONNECTOR_DVII:
638 case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */ 612 case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
639 if (drm_detect_hdmi_monitor(radeon_connector->edid)) 613 if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
640 return ATOM_ENCODER_MODE_HDMI; 614 /* fix me */
641 else if (radeon_connector->use_digital) 615 if (ASIC_IS_DCE4(rdev))
616 return ATOM_ENCODER_MODE_DVI;
617 else
618 return ATOM_ENCODER_MODE_HDMI;
619 } else if (radeon_connector->use_digital)
642 return ATOM_ENCODER_MODE_DVI; 620 return ATOM_ENCODER_MODE_DVI;
643 else 621 else
644 return ATOM_ENCODER_MODE_CRT; 622 return ATOM_ENCODER_MODE_CRT;
@@ -646,9 +624,13 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
646 case DRM_MODE_CONNECTOR_DVID: 624 case DRM_MODE_CONNECTOR_DVID:
647 case DRM_MODE_CONNECTOR_HDMIA: 625 case DRM_MODE_CONNECTOR_HDMIA:
648 default: 626 default:
649 if (drm_detect_hdmi_monitor(radeon_connector->edid)) 627 if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
650 return ATOM_ENCODER_MODE_HDMI; 628 /* fix me */
651 else 629 if (ASIC_IS_DCE4(rdev))
630 return ATOM_ENCODER_MODE_DVI;
631 else
632 return ATOM_ENCODER_MODE_HDMI;
633 } else
652 return ATOM_ENCODER_MODE_DVI; 634 return ATOM_ENCODER_MODE_DVI;
653 break; 635 break;
654 case DRM_MODE_CONNECTOR_LVDS: 636 case DRM_MODE_CONNECTOR_LVDS:
@@ -660,9 +642,13 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
660 if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || 642 if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
661 (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) 643 (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
662 return ATOM_ENCODER_MODE_DP; 644 return ATOM_ENCODER_MODE_DP;
663 else if (drm_detect_hdmi_monitor(radeon_connector->edid)) 645 else if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
664 return ATOM_ENCODER_MODE_HDMI; 646 /* fix me */
665 else 647 if (ASIC_IS_DCE4(rdev))
648 return ATOM_ENCODER_MODE_DVI;
649 else
650 return ATOM_ENCODER_MODE_HDMI;
651 } else
666 return ATOM_ENCODER_MODE_DVI; 652 return ATOM_ENCODER_MODE_DVI;
667 break; 653 break;
668 case DRM_MODE_CONNECTOR_DVIA: 654 case DRM_MODE_CONNECTOR_DVIA:
@@ -729,13 +715,24 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
729 struct radeon_device *rdev = dev->dev_private; 715 struct radeon_device *rdev = dev->dev_private;
730 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 716 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
731 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 717 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
732 struct radeon_connector_atom_dig *dig_connector = 718 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
733 radeon_get_atom_connector_priv_from_encoder(encoder);
734 union dig_encoder_control args; 719 union dig_encoder_control args;
735 int index = 0; 720 int index = 0;
736 uint8_t frev, crev; 721 uint8_t frev, crev;
722 int dp_clock = 0;
723 int dp_lane_count = 0;
724
725 if (connector) {
726 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
727 struct radeon_connector_atom_dig *dig_connector =
728 radeon_connector->con_priv;
737 729
738 if (!dig || !dig_connector) 730 dp_clock = dig_connector->dp_clock;
731 dp_lane_count = dig_connector->dp_lane_count;
732 }
733
734 /* no dig encoder assigned */
735 if (dig->dig_encoder == -1)
739 return; 736 return;
740 737
741 memset(&args, 0, sizeof(args)); 738 memset(&args, 0, sizeof(args));
@@ -757,9 +754,9 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
757 args.v1.ucEncoderMode = atombios_get_encoder_mode(encoder); 754 args.v1.ucEncoderMode = atombios_get_encoder_mode(encoder);
758 755
759 if (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) { 756 if (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) {
760 if (dig_connector->dp_clock == 270000) 757 if (dp_clock == 270000)
761 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ; 758 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
762 args.v1.ucLaneNum = dig_connector->dp_lane_count; 759 args.v1.ucLaneNum = dp_lane_count;
763 } else if (radeon_encoder->pixel_clock > 165000) 760 } else if (radeon_encoder->pixel_clock > 165000)
764 args.v1.ucLaneNum = 8; 761 args.v1.ucLaneNum = 8;
765 else 762 else
@@ -781,7 +778,7 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
781 args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER3; 778 args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER3;
782 break; 779 break;
783 } 780 }
784 if (dig_connector->linkb) 781 if (dig->linkb)
785 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB; 782 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
786 else 783 else
787 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA; 784 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
@@ -804,38 +801,47 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
804 struct radeon_device *rdev = dev->dev_private; 801 struct radeon_device *rdev = dev->dev_private;
805 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 802 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
806 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 803 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
807 struct radeon_connector_atom_dig *dig_connector = 804 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
808 radeon_get_atom_connector_priv_from_encoder(encoder);
809 struct drm_connector *connector;
810 struct radeon_connector *radeon_connector;
811 union dig_transmitter_control args; 805 union dig_transmitter_control args;
812 int index = 0; 806 int index = 0;
813 uint8_t frev, crev; 807 uint8_t frev, crev;
814 bool is_dp = false; 808 bool is_dp = false;
815 int pll_id = 0; 809 int pll_id = 0;
810 int dp_clock = 0;
811 int dp_lane_count = 0;
812 int connector_object_id = 0;
813 int igp_lane_info = 0;
816 814
817 if (!dig || !dig_connector) 815 if (connector) {
818 return; 816 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
817 struct radeon_connector_atom_dig *dig_connector =
818 radeon_connector->con_priv;
819 819
820 connector = radeon_get_connector_for_encoder(encoder); 820 dp_clock = dig_connector->dp_clock;
821 radeon_connector = to_radeon_connector(connector); 821 dp_lane_count = dig_connector->dp_lane_count;
822 connector_object_id =
823 (radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
824 igp_lane_info = dig_connector->igp_lane_info;
825 }
826
827 /* no dig encoder assigned */
828 if (dig->dig_encoder == -1)
829 return;
822 830
823 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) 831 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP)
824 is_dp = true; 832 is_dp = true;
825 833
826 memset(&args, 0, sizeof(args)); 834 memset(&args, 0, sizeof(args));
827 835
828 if (ASIC_IS_DCE32(rdev) || ASIC_IS_DCE4(rdev)) 836 switch (radeon_encoder->encoder_id) {
837 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
838 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
839 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
829 index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl); 840 index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
830 else { 841 break;
831 switch (radeon_encoder->encoder_id) { 842 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
832 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 843 index = GetIndexIntoMasterTable(COMMAND, LVTMATransmitterControl);
833 index = GetIndexIntoMasterTable(COMMAND, DIG1TransmitterControl); 844 break;
834 break;
835 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
836 index = GetIndexIntoMasterTable(COMMAND, DIG2TransmitterControl);
837 break;
838 }
839 } 845 }
840 846
841 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) 847 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
@@ -843,14 +849,14 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
843 849
844 args.v1.ucAction = action; 850 args.v1.ucAction = action;
845 if (action == ATOM_TRANSMITTER_ACTION_INIT) { 851 if (action == ATOM_TRANSMITTER_ACTION_INIT) {
846 args.v1.usInitInfo = radeon_connector->connector_object_id; 852 args.v1.usInitInfo = connector_object_id;
847 } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) { 853 } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
848 args.v1.asMode.ucLaneSel = lane_num; 854 args.v1.asMode.ucLaneSel = lane_num;
849 args.v1.asMode.ucLaneSet = lane_set; 855 args.v1.asMode.ucLaneSet = lane_set;
850 } else { 856 } else {
851 if (is_dp) 857 if (is_dp)
852 args.v1.usPixelClock = 858 args.v1.usPixelClock =
853 cpu_to_le16(dig_connector->dp_clock / 10); 859 cpu_to_le16(dp_clock / 10);
854 else if (radeon_encoder->pixel_clock > 165000) 860 else if (radeon_encoder->pixel_clock > 165000)
855 args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10); 861 args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
856 else 862 else
@@ -858,13 +864,13 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
858 } 864 }
859 if (ASIC_IS_DCE4(rdev)) { 865 if (ASIC_IS_DCE4(rdev)) {
860 if (is_dp) 866 if (is_dp)
861 args.v3.ucLaneNum = dig_connector->dp_lane_count; 867 args.v3.ucLaneNum = dp_lane_count;
862 else if (radeon_encoder->pixel_clock > 165000) 868 else if (radeon_encoder->pixel_clock > 165000)
863 args.v3.ucLaneNum = 8; 869 args.v3.ucLaneNum = 8;
864 else 870 else
865 args.v3.ucLaneNum = 4; 871 args.v3.ucLaneNum = 4;
866 872
867 if (dig_connector->linkb) { 873 if (dig->linkb) {
868 args.v3.acConfig.ucLinkSel = 1; 874 args.v3.acConfig.ucLinkSel = 1;
869 args.v3.acConfig.ucEncoderSel = 1; 875 args.v3.acConfig.ucEncoderSel = 1;
870 } 876 }
@@ -904,7 +910,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
904 } 910 }
905 } else if (ASIC_IS_DCE32(rdev)) { 911 } else if (ASIC_IS_DCE32(rdev)) {
906 args.v2.acConfig.ucEncoderSel = dig->dig_encoder; 912 args.v2.acConfig.ucEncoderSel = dig->dig_encoder;
907 if (dig_connector->linkb) 913 if (dig->linkb)
908 args.v2.acConfig.ucLinkSel = 1; 914 args.v2.acConfig.ucLinkSel = 1;
909 915
910 switch (radeon_encoder->encoder_id) { 916 switch (radeon_encoder->encoder_id) {
@@ -938,23 +944,23 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
938 if ((rdev->flags & RADEON_IS_IGP) && 944 if ((rdev->flags & RADEON_IS_IGP) &&
939 (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY)) { 945 (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY)) {
940 if (is_dp || (radeon_encoder->pixel_clock <= 165000)) { 946 if (is_dp || (radeon_encoder->pixel_clock <= 165000)) {
941 if (dig_connector->igp_lane_info & 0x1) 947 if (igp_lane_info & 0x1)
942 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3; 948 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3;
943 else if (dig_connector->igp_lane_info & 0x2) 949 else if (igp_lane_info & 0x2)
944 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_4_7; 950 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_4_7;
945 else if (dig_connector->igp_lane_info & 0x4) 951 else if (igp_lane_info & 0x4)
946 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_11; 952 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_11;
947 else if (dig_connector->igp_lane_info & 0x8) 953 else if (igp_lane_info & 0x8)
948 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15; 954 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15;
949 } else { 955 } else {
950 if (dig_connector->igp_lane_info & 0x3) 956 if (igp_lane_info & 0x3)
951 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7; 957 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7;
952 else if (dig_connector->igp_lane_info & 0xc) 958 else if (igp_lane_info & 0xc)
953 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15; 959 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15;
954 } 960 }
955 } 961 }
956 962
957 if (dig_connector->linkb) 963 if (dig->linkb)
958 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB; 964 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB;
959 else 965 else
960 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA; 966 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA;
@@ -1072,8 +1078,7 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
1072 if (is_dig) { 1078 if (is_dig) {
1073 switch (mode) { 1079 switch (mode) {
1074 case DRM_MODE_DPMS_ON: 1080 case DRM_MODE_DPMS_ON:
1075 if (!ASIC_IS_DCE4(rdev)) 1081 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
1076 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
1077 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) { 1082 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) {
1078 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); 1083 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
1079 1084
@@ -1085,8 +1090,7 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
1085 case DRM_MODE_DPMS_STANDBY: 1090 case DRM_MODE_DPMS_STANDBY:
1086 case DRM_MODE_DPMS_SUSPEND: 1091 case DRM_MODE_DPMS_SUSPEND:
1087 case DRM_MODE_DPMS_OFF: 1092 case DRM_MODE_DPMS_OFF:
1088 if (!ASIC_IS_DCE4(rdev)) 1093 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
1089 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
1090 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) { 1094 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) {
1091 if (ASIC_IS_DCE4(rdev)) 1095 if (ASIC_IS_DCE4(rdev))
1092 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF); 1096 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF);
@@ -1290,24 +1294,22 @@ static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)
1290 uint32_t dig_enc_in_use = 0; 1294 uint32_t dig_enc_in_use = 0;
1291 1295
1292 if (ASIC_IS_DCE4(rdev)) { 1296 if (ASIC_IS_DCE4(rdev)) {
1293 struct radeon_connector_atom_dig *dig_connector = 1297 dig = radeon_encoder->enc_priv;
1294 radeon_get_atom_connector_priv_from_encoder(encoder);
1295
1296 switch (radeon_encoder->encoder_id) { 1298 switch (radeon_encoder->encoder_id) {
1297 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 1299 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
1298 if (dig_connector->linkb) 1300 if (dig->linkb)
1299 return 1; 1301 return 1;
1300 else 1302 else
1301 return 0; 1303 return 0;
1302 break; 1304 break;
1303 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 1305 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
1304 if (dig_connector->linkb) 1306 if (dig->linkb)
1305 return 3; 1307 return 3;
1306 else 1308 else
1307 return 2; 1309 return 2;
1308 break; 1310 break;
1309 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 1311 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
1310 if (dig_connector->linkb) 1312 if (dig->linkb)
1311 return 5; 1313 return 5;
1312 else 1314 else
1313 return 4; 1315 return 4;
@@ -1641,6 +1643,7 @@ radeon_atombios_set_dac_info(struct radeon_encoder *radeon_encoder)
1641struct radeon_encoder_atom_dig * 1643struct radeon_encoder_atom_dig *
1642radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder) 1644radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder)
1643{ 1645{
1646 int encoder_enum = (radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
1644 struct radeon_encoder_atom_dig *dig = kzalloc(sizeof(struct radeon_encoder_atom_dig), GFP_KERNEL); 1647 struct radeon_encoder_atom_dig *dig = kzalloc(sizeof(struct radeon_encoder_atom_dig), GFP_KERNEL);
1645 1648
1646 if (!dig) 1649 if (!dig)
@@ -1650,11 +1653,16 @@ radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder)
1650 dig->coherent_mode = true; 1653 dig->coherent_mode = true;
1651 dig->dig_encoder = -1; 1654 dig->dig_encoder = -1;
1652 1655
1656 if (encoder_enum == 2)
1657 dig->linkb = true;
1658 else
1659 dig->linkb = false;
1660
1653 return dig; 1661 return dig;
1654} 1662}
1655 1663
1656void 1664void
1657radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t supported_device) 1665radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t supported_device)
1658{ 1666{
1659 struct radeon_device *rdev = dev->dev_private; 1667 struct radeon_device *rdev = dev->dev_private;
1660 struct drm_encoder *encoder; 1668 struct drm_encoder *encoder;
@@ -1663,7 +1671,7 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su
1663 /* see if we already added it */ 1671 /* see if we already added it */
1664 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 1672 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
1665 radeon_encoder = to_radeon_encoder(encoder); 1673 radeon_encoder = to_radeon_encoder(encoder);
1666 if (radeon_encoder->encoder_id == encoder_id) { 1674 if (radeon_encoder->encoder_enum == encoder_enum) {
1667 radeon_encoder->devices |= supported_device; 1675 radeon_encoder->devices |= supported_device;
1668 return; 1676 return;
1669 } 1677 }
@@ -1691,7 +1699,8 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su
1691 1699
1692 radeon_encoder->enc_priv = NULL; 1700 radeon_encoder->enc_priv = NULL;
1693 1701
1694 radeon_encoder->encoder_id = encoder_id; 1702 radeon_encoder->encoder_enum = encoder_enum;
1703 radeon_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
1695 radeon_encoder->devices = supported_device; 1704 radeon_encoder->devices = supported_device;
1696 radeon_encoder->rmx_type = RMX_OFF; 1705 radeon_encoder->rmx_type = RMX_OFF;
1697 radeon_encoder->underscan_type = UNDERSCAN_OFF; 1706 radeon_encoder->underscan_type = UNDERSCAN_OFF;
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index dbf86962bdd1..40b0c087b592 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -94,6 +94,7 @@ static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj)
94 ret = radeon_bo_reserve(rbo, false); 94 ret = radeon_bo_reserve(rbo, false);
95 if (likely(ret == 0)) { 95 if (likely(ret == 0)) {
96 radeon_bo_kunmap(rbo); 96 radeon_bo_kunmap(rbo);
97 radeon_bo_unpin(rbo);
97 radeon_bo_unreserve(rbo); 98 radeon_bo_unreserve(rbo);
98 } 99 }
99 drm_gem_object_unreference_unlocked(gobj); 100 drm_gem_object_unreference_unlocked(gobj);
@@ -118,7 +119,7 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
118 aligned_size = ALIGN(size, PAGE_SIZE); 119 aligned_size = ALIGN(size, PAGE_SIZE);
119 ret = radeon_gem_object_create(rdev, aligned_size, 0, 120 ret = radeon_gem_object_create(rdev, aligned_size, 0,
120 RADEON_GEM_DOMAIN_VRAM, 121 RADEON_GEM_DOMAIN_VRAM,
121 false, ttm_bo_type_kernel, 122 false, true,
122 &gobj); 123 &gobj);
123 if (ret) { 124 if (ret) {
124 printk(KERN_ERR "failed to allocate framebuffer (%d)\n", 125 printk(KERN_ERR "failed to allocate framebuffer (%d)\n",
@@ -325,8 +326,6 @@ static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfb
325{ 326{
326 struct fb_info *info; 327 struct fb_info *info;
327 struct radeon_framebuffer *rfb = &rfbdev->rfb; 328 struct radeon_framebuffer *rfb = &rfbdev->rfb;
328 struct radeon_bo *rbo;
329 int r;
330 329
331 if (rfbdev->helper.fbdev) { 330 if (rfbdev->helper.fbdev) {
332 info = rfbdev->helper.fbdev; 331 info = rfbdev->helper.fbdev;
@@ -338,14 +337,8 @@ static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfb
338 } 337 }
339 338
340 if (rfb->obj) { 339 if (rfb->obj) {
341 rbo = rfb->obj->driver_private; 340 radeonfb_destroy_pinned_object(rfb->obj);
342 r = radeon_bo_reserve(rbo, false); 341 rfb->obj = NULL;
343 if (likely(r == 0)) {
344 radeon_bo_kunmap(rbo);
345 radeon_bo_unpin(rbo);
346 radeon_bo_unreserve(rbo);
347 }
348 drm_gem_object_unreference_unlocked(rfb->obj);
349 } 342 }
350 drm_fb_helper_fini(&rfbdev->helper); 343 drm_fb_helper_fini(&rfbdev->helper);
351 drm_framebuffer_cleanup(&rfb->base); 344 drm_framebuffer_cleanup(&rfb->base);
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index c578f265b24c..d1e595d91723 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -201,11 +201,11 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
201 return r; 201 return r;
202 } 202 }
203 r = drm_gem_handle_create(filp, gobj, &handle); 203 r = drm_gem_handle_create(filp, gobj, &handle);
204 /* drop reference from allocate - handle holds it now */
205 drm_gem_object_unreference_unlocked(gobj);
204 if (r) { 206 if (r) {
205 drm_gem_object_unreference_unlocked(gobj);
206 return r; 207 return r;
207 } 208 }
208 drm_gem_object_handle_unreference_unlocked(gobj);
209 args->handle = handle; 209 args->handle = handle;
210 return 0; 210 return 0;
211} 211}
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index bfd2ce5f5372..6a13ee38a5b9 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -99,6 +99,13 @@ static void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state)
99 } 99 }
100 } 100 }
101 101
102 /* switch the pads to ddc mode */
103 if (ASIC_IS_DCE3(rdev) && rec->hw_capable) {
104 temp = RREG32(rec->mask_clk_reg);
105 temp &= ~(1 << 16);
106 WREG32(rec->mask_clk_reg, temp);
107 }
108
102 /* clear the output pin values */ 109 /* clear the output pin values */
103 temp = RREG32(rec->a_clk_reg) & ~rec->a_clk_mask; 110 temp = RREG32(rec->a_clk_reg) & ~rec->a_clk_mask;
104 WREG32(rec->a_clk_reg, temp); 111 WREG32(rec->a_clk_reg, temp);
@@ -206,7 +213,7 @@ static void post_xfer(struct i2c_adapter *i2c_adap)
206 213
207static u32 radeon_get_i2c_prescale(struct radeon_device *rdev) 214static u32 radeon_get_i2c_prescale(struct radeon_device *rdev)
208{ 215{
209 u32 sclk = radeon_get_engine_clock(rdev); 216 u32 sclk = rdev->pm.current_sclk;
210 u32 prescale = 0; 217 u32 prescale = 0;
211 u32 nm; 218 u32 nm;
212 u8 n, m, loop; 219 u8 n, m, loop;
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 059bfa4098d7..a108c7ed14f5 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -121,11 +121,12 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
121 * chips. Disable MSI on them for now. 121 * chips. Disable MSI on them for now.
122 */ 122 */
123 if ((rdev->family >= CHIP_RV380) && 123 if ((rdev->family >= CHIP_RV380) &&
124 (!(rdev->flags & RADEON_IS_IGP))) { 124 (!(rdev->flags & RADEON_IS_IGP)) &&
125 (!(rdev->flags & RADEON_IS_AGP))) {
125 int ret = pci_enable_msi(rdev->pdev); 126 int ret = pci_enable_msi(rdev->pdev);
126 if (!ret) { 127 if (!ret) {
127 rdev->msi_enabled = 1; 128 rdev->msi_enabled = 1;
128 DRM_INFO("radeon: using MSI.\n"); 129 dev_info(rdev->dev, "radeon: using MSI.\n");
129 } 130 }
130 } 131 }
131 rdev->irq.installed = true; 132 rdev->irq.installed = true;
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index b1c8ace5f080..8fbbe1c6ebbd 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -161,6 +161,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
161 DRM_DEBUG_KMS("tiling config is r6xx+ only!\n"); 161 DRM_DEBUG_KMS("tiling config is r6xx+ only!\n");
162 return -EINVAL; 162 return -EINVAL;
163 } 163 }
164 break;
164 case RADEON_INFO_WANT_HYPERZ: 165 case RADEON_INFO_WANT_HYPERZ:
165 /* The "value" here is both an input and output parameter. 166 /* The "value" here is both an input and output parameter.
166 * If the input value is 1, filp requests hyper-z access. 167 * If the input value is 1, filp requests hyper-z access.
@@ -202,6 +203,10 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
202 */ 203 */
203int radeon_driver_firstopen_kms(struct drm_device *dev) 204int radeon_driver_firstopen_kms(struct drm_device *dev)
204{ 205{
206 struct radeon_device *rdev = dev->dev_private;
207
208 if (rdev->powered_down)
209 return -EINVAL;
205 return 0; 210 return 0;
206} 211}
207 212
@@ -323,45 +328,45 @@ KMS_INVALID_IOCTL(radeon_surface_free_kms)
323 328
324 329
325struct drm_ioctl_desc radeon_ioctls_kms[] = { 330struct drm_ioctl_desc radeon_ioctls_kms[] = {
326 DRM_IOCTL_DEF(DRM_RADEON_CP_INIT, radeon_cp_init_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 331 DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, radeon_cp_init_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
327 DRM_IOCTL_DEF(DRM_RADEON_CP_START, radeon_cp_start_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 332 DRM_IOCTL_DEF_DRV(RADEON_CP_START, radeon_cp_start_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
328 DRM_IOCTL_DEF(DRM_RADEON_CP_STOP, radeon_cp_stop_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 333 DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, radeon_cp_stop_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
329 DRM_IOCTL_DEF(DRM_RADEON_CP_RESET, radeon_cp_reset_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 334 DRM_IOCTL_DEF_DRV(RADEON_CP_RESET, radeon_cp_reset_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
330 DRM_IOCTL_DEF(DRM_RADEON_CP_IDLE, radeon_cp_idle_kms, DRM_AUTH), 335 DRM_IOCTL_DEF_DRV(RADEON_CP_IDLE, radeon_cp_idle_kms, DRM_AUTH),
331 DRM_IOCTL_DEF(DRM_RADEON_CP_RESUME, radeon_cp_resume_kms, DRM_AUTH), 336 DRM_IOCTL_DEF_DRV(RADEON_CP_RESUME, radeon_cp_resume_kms, DRM_AUTH),
332 DRM_IOCTL_DEF(DRM_RADEON_RESET, radeon_engine_reset_kms, DRM_AUTH), 337 DRM_IOCTL_DEF_DRV(RADEON_RESET, radeon_engine_reset_kms, DRM_AUTH),
333 DRM_IOCTL_DEF(DRM_RADEON_FULLSCREEN, radeon_fullscreen_kms, DRM_AUTH), 338 DRM_IOCTL_DEF_DRV(RADEON_FULLSCREEN, radeon_fullscreen_kms, DRM_AUTH),
334 DRM_IOCTL_DEF(DRM_RADEON_SWAP, radeon_cp_swap_kms, DRM_AUTH), 339 DRM_IOCTL_DEF_DRV(RADEON_SWAP, radeon_cp_swap_kms, DRM_AUTH),
335 DRM_IOCTL_DEF(DRM_RADEON_CLEAR, radeon_cp_clear_kms, DRM_AUTH), 340 DRM_IOCTL_DEF_DRV(RADEON_CLEAR, radeon_cp_clear_kms, DRM_AUTH),
336 DRM_IOCTL_DEF(DRM_RADEON_VERTEX, radeon_cp_vertex_kms, DRM_AUTH), 341 DRM_IOCTL_DEF_DRV(RADEON_VERTEX, radeon_cp_vertex_kms, DRM_AUTH),
337 DRM_IOCTL_DEF(DRM_RADEON_INDICES, radeon_cp_indices_kms, DRM_AUTH), 342 DRM_IOCTL_DEF_DRV(RADEON_INDICES, radeon_cp_indices_kms, DRM_AUTH),
338 DRM_IOCTL_DEF(DRM_RADEON_TEXTURE, radeon_cp_texture_kms, DRM_AUTH), 343 DRM_IOCTL_DEF_DRV(RADEON_TEXTURE, radeon_cp_texture_kms, DRM_AUTH),
339 DRM_IOCTL_DEF(DRM_RADEON_STIPPLE, radeon_cp_stipple_kms, DRM_AUTH), 344 DRM_IOCTL_DEF_DRV(RADEON_STIPPLE, radeon_cp_stipple_kms, DRM_AUTH),
340 DRM_IOCTL_DEF(DRM_RADEON_INDIRECT, radeon_cp_indirect_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 345 DRM_IOCTL_DEF_DRV(RADEON_INDIRECT, radeon_cp_indirect_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
341 DRM_IOCTL_DEF(DRM_RADEON_VERTEX2, radeon_cp_vertex2_kms, DRM_AUTH), 346 DRM_IOCTL_DEF_DRV(RADEON_VERTEX2, radeon_cp_vertex2_kms, DRM_AUTH),
342 DRM_IOCTL_DEF(DRM_RADEON_CMDBUF, radeon_cp_cmdbuf_kms, DRM_AUTH), 347 DRM_IOCTL_DEF_DRV(RADEON_CMDBUF, radeon_cp_cmdbuf_kms, DRM_AUTH),
343 DRM_IOCTL_DEF(DRM_RADEON_GETPARAM, radeon_cp_getparam_kms, DRM_AUTH), 348 DRM_IOCTL_DEF_DRV(RADEON_GETPARAM, radeon_cp_getparam_kms, DRM_AUTH),
344 DRM_IOCTL_DEF(DRM_RADEON_FLIP, radeon_cp_flip_kms, DRM_AUTH), 349 DRM_IOCTL_DEF_DRV(RADEON_FLIP, radeon_cp_flip_kms, DRM_AUTH),
345 DRM_IOCTL_DEF(DRM_RADEON_ALLOC, radeon_mem_alloc_kms, DRM_AUTH), 350 DRM_IOCTL_DEF_DRV(RADEON_ALLOC, radeon_mem_alloc_kms, DRM_AUTH),
346 DRM_IOCTL_DEF(DRM_RADEON_FREE, radeon_mem_free_kms, DRM_AUTH), 351 DRM_IOCTL_DEF_DRV(RADEON_FREE, radeon_mem_free_kms, DRM_AUTH),
347 DRM_IOCTL_DEF(DRM_RADEON_INIT_HEAP, radeon_mem_init_heap_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 352 DRM_IOCTL_DEF_DRV(RADEON_INIT_HEAP, radeon_mem_init_heap_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
348 DRM_IOCTL_DEF(DRM_RADEON_IRQ_EMIT, radeon_irq_emit_kms, DRM_AUTH), 353 DRM_IOCTL_DEF_DRV(RADEON_IRQ_EMIT, radeon_irq_emit_kms, DRM_AUTH),
349 DRM_IOCTL_DEF(DRM_RADEON_IRQ_WAIT, radeon_irq_wait_kms, DRM_AUTH), 354 DRM_IOCTL_DEF_DRV(RADEON_IRQ_WAIT, radeon_irq_wait_kms, DRM_AUTH),
350 DRM_IOCTL_DEF(DRM_RADEON_SETPARAM, radeon_cp_setparam_kms, DRM_AUTH), 355 DRM_IOCTL_DEF_DRV(RADEON_SETPARAM, radeon_cp_setparam_kms, DRM_AUTH),
351 DRM_IOCTL_DEF(DRM_RADEON_SURF_ALLOC, radeon_surface_alloc_kms, DRM_AUTH), 356 DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, radeon_surface_alloc_kms, DRM_AUTH),
352 DRM_IOCTL_DEF(DRM_RADEON_SURF_FREE, radeon_surface_free_kms, DRM_AUTH), 357 DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, radeon_surface_free_kms, DRM_AUTH),
353 /* KMS */ 358 /* KMS */
354 DRM_IOCTL_DEF(DRM_RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_UNLOCKED), 359 DRM_IOCTL_DEF_DRV(RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_UNLOCKED),
355 DRM_IOCTL_DEF(DRM_RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED), 360 DRM_IOCTL_DEF_DRV(RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED),
356 DRM_IOCTL_DEF(DRM_RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED), 361 DRM_IOCTL_DEF_DRV(RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED),
357 DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_UNLOCKED), 362 DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_UNLOCKED),
358 DRM_IOCTL_DEF(DRM_RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH|DRM_UNLOCKED), 363 DRM_IOCTL_DEF_DRV(RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH|DRM_UNLOCKED),
359 DRM_IOCTL_DEF(DRM_RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH|DRM_UNLOCKED), 364 DRM_IOCTL_DEF_DRV(RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH|DRM_UNLOCKED),
360 DRM_IOCTL_DEF(DRM_RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED), 365 DRM_IOCTL_DEF_DRV(RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED),
361 DRM_IOCTL_DEF(DRM_RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_UNLOCKED), 366 DRM_IOCTL_DEF_DRV(RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_UNLOCKED),
362 DRM_IOCTL_DEF(DRM_RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_UNLOCKED), 367 DRM_IOCTL_DEF_DRV(RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_UNLOCKED),
363 DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED), 368 DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
364 DRM_IOCTL_DEF(DRM_RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED), 369 DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
365 DRM_IOCTL_DEF(DRM_RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED), 370 DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
366}; 371};
367int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms); 372int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms);
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 989df519a1e4..305049afde15 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -272,7 +272,7 @@ static uint8_t radeon_compute_pll_gain(uint16_t ref_freq, uint16_t ref_div,
272 if (!ref_div) 272 if (!ref_div)
273 return 1; 273 return 1;
274 274
275 vcoFreq = ((unsigned)ref_freq & fb_div) / ref_div; 275 vcoFreq = ((unsigned)ref_freq * fb_div) / ref_div;
276 276
277 /* 277 /*
278 * This is horribly crude: the VCO frequency range is divided into 278 * This is horribly crude: the VCO frequency range is divided into
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index b8149cbc0c70..0b8397000f4c 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -1345,7 +1345,7 @@ static struct radeon_encoder_ext_tmds *radeon_legacy_get_ext_tmds_info(struct ra
1345} 1345}
1346 1346
1347void 1347void
1348radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t supported_device) 1348radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t supported_device)
1349{ 1349{
1350 struct radeon_device *rdev = dev->dev_private; 1350 struct radeon_device *rdev = dev->dev_private;
1351 struct drm_encoder *encoder; 1351 struct drm_encoder *encoder;
@@ -1354,7 +1354,7 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t
1354 /* see if we already added it */ 1354 /* see if we already added it */
1355 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 1355 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
1356 radeon_encoder = to_radeon_encoder(encoder); 1356 radeon_encoder = to_radeon_encoder(encoder);
1357 if (radeon_encoder->encoder_id == encoder_id) { 1357 if (radeon_encoder->encoder_enum == encoder_enum) {
1358 radeon_encoder->devices |= supported_device; 1358 radeon_encoder->devices |= supported_device;
1359 return; 1359 return;
1360 } 1360 }
@@ -1374,7 +1374,8 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t
1374 1374
1375 radeon_encoder->enc_priv = NULL; 1375 radeon_encoder->enc_priv = NULL;
1376 1376
1377 radeon_encoder->encoder_id = encoder_id; 1377 radeon_encoder->encoder_enum = encoder_enum;
1378 radeon_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
1378 radeon_encoder->devices = supported_device; 1379 radeon_encoder->devices = supported_device;
1379 radeon_encoder->rmx_type = RMX_OFF; 1380 radeon_encoder->rmx_type = RMX_OFF;
1380 1381
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 5bbc086b9267..17a6602b5885 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -204,7 +204,7 @@ struct radeon_i2c_chan {
204 204
205/* mostly for macs, but really any system without connector tables */ 205/* mostly for macs, but really any system without connector tables */
206enum radeon_connector_table { 206enum radeon_connector_table {
207 CT_NONE, 207 CT_NONE = 0,
208 CT_GENERIC, 208 CT_GENERIC,
209 CT_IBOOK, 209 CT_IBOOK,
210 CT_POWERBOOK_EXTERNAL, 210 CT_POWERBOOK_EXTERNAL,
@@ -215,6 +215,7 @@ enum radeon_connector_table {
215 CT_IMAC_G5_ISIGHT, 215 CT_IMAC_G5_ISIGHT,
216 CT_EMAC, 216 CT_EMAC,
217 CT_RN50_POWER, 217 CT_RN50_POWER,
218 CT_MAC_X800,
218}; 219};
219 220
220enum radeon_dvo_chip { 221enum radeon_dvo_chip {
@@ -342,6 +343,7 @@ struct radeon_atom_ss {
342}; 343};
343 344
344struct radeon_encoder_atom_dig { 345struct radeon_encoder_atom_dig {
346 bool linkb;
345 /* atom dig */ 347 /* atom dig */
346 bool coherent_mode; 348 bool coherent_mode;
347 int dig_encoder; /* -1 disabled, 0 DIGA, 1 DIGB */ 349 int dig_encoder; /* -1 disabled, 0 DIGA, 1 DIGB */
@@ -360,6 +362,7 @@ struct radeon_encoder_atom_dac {
360 362
361struct radeon_encoder { 363struct radeon_encoder {
362 struct drm_encoder base; 364 struct drm_encoder base;
365 uint32_t encoder_enum;
363 uint32_t encoder_id; 366 uint32_t encoder_id;
364 uint32_t devices; 367 uint32_t devices;
365 uint32_t active_device; 368 uint32_t active_device;
@@ -378,7 +381,6 @@ struct radeon_encoder {
378 381
379struct radeon_connector_atom_dig { 382struct radeon_connector_atom_dig {
380 uint32_t igp_lane_info; 383 uint32_t igp_lane_info;
381 bool linkb;
382 /* displayport */ 384 /* displayport */
383 struct radeon_i2c_chan *dp_i2c_bus; 385 struct radeon_i2c_chan *dp_i2c_bus;
384 u8 dpcd[8]; 386 u8 dpcd[8];
@@ -599,7 +601,6 @@ extern bool radeon_get_atom_connector_info_from_supported_devices_table(struct d
599void radeon_enc_destroy(struct drm_encoder *encoder); 601void radeon_enc_destroy(struct drm_encoder *encoder);
600void radeon_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj); 602void radeon_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj);
601void radeon_combios_asic_init(struct drm_device *dev); 603void radeon_combios_asic_init(struct drm_device *dev);
602extern int radeon_static_clocks_init(struct drm_device *dev);
603bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, 604bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
604 struct drm_display_mode *mode, 605 struct drm_display_mode *mode,
605 struct drm_display_mode *adjusted_mode); 606 struct drm_display_mode *adjusted_mode);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 0afd1e62347d..b3b5306bb578 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -69,7 +69,7 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
69 u32 c = 0; 69 u32 c = 0;
70 70
71 rbo->placement.fpfn = 0; 71 rbo->placement.fpfn = 0;
72 rbo->placement.lpfn = 0; 72 rbo->placement.lpfn = rbo->rdev->mc.active_vram_size >> PAGE_SHIFT;
73 rbo->placement.placement = rbo->placements; 73 rbo->placement.placement = rbo->placements;
74 rbo->placement.busy_placement = rbo->placements; 74 rbo->placement.busy_placement = rbo->placements;
75 if (domain & RADEON_GEM_DOMAIN_VRAM) 75 if (domain & RADEON_GEM_DOMAIN_VRAM)
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 353998dc2c03..3481bc7f6f58 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -124,11 +124,8 @@ static inline int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
124 int r; 124 int r;
125 125
126 r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0); 126 r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
127 if (unlikely(r != 0)) { 127 if (unlikely(r != 0))
128 if (r != -ERESTARTSYS)
129 dev_err(bo->rdev->dev, "%p reserve failed for wait\n", bo);
130 return r; 128 return r;
131 }
132 spin_lock(&bo->tbo.lock); 129 spin_lock(&bo->tbo.lock);
133 if (mem_type) 130 if (mem_type)
134 *mem_type = bo->tbo.mem.mem_type; 131 *mem_type = bo->tbo.mem.mem_type;
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 58038f5cab38..f87efec76236 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -226,6 +226,11 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
226{ 226{
227 int i; 227 int i;
228 228
229 /* no need to take locks, etc. if nothing's going to change */
230 if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
231 (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
232 return;
233
229 mutex_lock(&rdev->ddev->struct_mutex); 234 mutex_lock(&rdev->ddev->struct_mutex);
230 mutex_lock(&rdev->vram_mutex); 235 mutex_lock(&rdev->vram_mutex);
231 mutex_lock(&rdev->cp.mutex); 236 mutex_lock(&rdev->cp.mutex);
@@ -632,8 +637,6 @@ void radeon_pm_fini(struct radeon_device *rdev)
632 } 637 }
633 638
634 radeon_hwmon_fini(rdev); 639 radeon_hwmon_fini(rdev);
635 if (rdev->pm.i2c_bus)
636 radeon_i2c_destroy(rdev->pm.i2c_bus);
637} 640}
638 641
639void radeon_pm_compute_clocks(struct radeon_device *rdev) 642void radeon_pm_compute_clocks(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
index b3ba44c0a818..4ae5a3d1074e 100644
--- a/drivers/gpu/drm/radeon/radeon_state.c
+++ b/drivers/gpu/drm/radeon/radeon_state.c
@@ -3228,34 +3228,34 @@ void radeon_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
3228} 3228}
3229 3229
3230struct drm_ioctl_desc radeon_ioctls[] = { 3230struct drm_ioctl_desc radeon_ioctls[] = {
3231 DRM_IOCTL_DEF(DRM_RADEON_CP_INIT, radeon_cp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 3231 DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, radeon_cp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
3232 DRM_IOCTL_DEF(DRM_RADEON_CP_START, radeon_cp_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 3232 DRM_IOCTL_DEF_DRV(RADEON_CP_START, radeon_cp_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
3233 DRM_IOCTL_DEF(DRM_RADEON_CP_STOP, radeon_cp_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 3233 DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, radeon_cp_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
3234 DRM_IOCTL_DEF(DRM_RADEON_CP_RESET, radeon_cp_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 3234 DRM_IOCTL_DEF_DRV(RADEON_CP_RESET, radeon_cp_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
3235 DRM_IOCTL_DEF(DRM_RADEON_CP_IDLE, radeon_cp_idle, DRM_AUTH), 3235 DRM_IOCTL_DEF_DRV(RADEON_CP_IDLE, radeon_cp_idle, DRM_AUTH),
3236 DRM_IOCTL_DEF(DRM_RADEON_CP_RESUME, radeon_cp_resume, DRM_AUTH), 3236 DRM_IOCTL_DEF_DRV(RADEON_CP_RESUME, radeon_cp_resume, DRM_AUTH),
3237 DRM_IOCTL_DEF(DRM_RADEON_RESET, radeon_engine_reset, DRM_AUTH), 3237 DRM_IOCTL_DEF_DRV(RADEON_RESET, radeon_engine_reset, DRM_AUTH),
3238 DRM_IOCTL_DEF(DRM_RADEON_FULLSCREEN, radeon_fullscreen, DRM_AUTH), 3238 DRM_IOCTL_DEF_DRV(RADEON_FULLSCREEN, radeon_fullscreen, DRM_AUTH),
3239 DRM_IOCTL_DEF(DRM_RADEON_SWAP, radeon_cp_swap, DRM_AUTH), 3239 DRM_IOCTL_DEF_DRV(RADEON_SWAP, radeon_cp_swap, DRM_AUTH),
3240 DRM_IOCTL_DEF(DRM_RADEON_CLEAR, radeon_cp_clear, DRM_AUTH), 3240 DRM_IOCTL_DEF_DRV(RADEON_CLEAR, radeon_cp_clear, DRM_AUTH),
3241 DRM_IOCTL_DEF(DRM_RADEON_VERTEX, radeon_cp_vertex, DRM_AUTH), 3241 DRM_IOCTL_DEF_DRV(RADEON_VERTEX, radeon_cp_vertex, DRM_AUTH),
3242 DRM_IOCTL_DEF(DRM_RADEON_INDICES, radeon_cp_indices, DRM_AUTH), 3242 DRM_IOCTL_DEF_DRV(RADEON_INDICES, radeon_cp_indices, DRM_AUTH),
3243 DRM_IOCTL_DEF(DRM_RADEON_TEXTURE, radeon_cp_texture, DRM_AUTH), 3243 DRM_IOCTL_DEF_DRV(RADEON_TEXTURE, radeon_cp_texture, DRM_AUTH),
3244 DRM_IOCTL_DEF(DRM_RADEON_STIPPLE, radeon_cp_stipple, DRM_AUTH), 3244 DRM_IOCTL_DEF_DRV(RADEON_STIPPLE, radeon_cp_stipple, DRM_AUTH),
3245 DRM_IOCTL_DEF(DRM_RADEON_INDIRECT, radeon_cp_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 3245 DRM_IOCTL_DEF_DRV(RADEON_INDIRECT, radeon_cp_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
3246 DRM_IOCTL_DEF(DRM_RADEON_VERTEX2, radeon_cp_vertex2, DRM_AUTH), 3246 DRM_IOCTL_DEF_DRV(RADEON_VERTEX2, radeon_cp_vertex2, DRM_AUTH),
3247 DRM_IOCTL_DEF(DRM_RADEON_CMDBUF, radeon_cp_cmdbuf, DRM_AUTH), 3247 DRM_IOCTL_DEF_DRV(RADEON_CMDBUF, radeon_cp_cmdbuf, DRM_AUTH),
3248 DRM_IOCTL_DEF(DRM_RADEON_GETPARAM, radeon_cp_getparam, DRM_AUTH), 3248 DRM_IOCTL_DEF_DRV(RADEON_GETPARAM, radeon_cp_getparam, DRM_AUTH),
3249 DRM_IOCTL_DEF(DRM_RADEON_FLIP, radeon_cp_flip, DRM_AUTH), 3249 DRM_IOCTL_DEF_DRV(RADEON_FLIP, radeon_cp_flip, DRM_AUTH),
3250 DRM_IOCTL_DEF(DRM_RADEON_ALLOC, radeon_mem_alloc, DRM_AUTH), 3250 DRM_IOCTL_DEF_DRV(RADEON_ALLOC, radeon_mem_alloc, DRM_AUTH),
3251 DRM_IOCTL_DEF(DRM_RADEON_FREE, radeon_mem_free, DRM_AUTH), 3251 DRM_IOCTL_DEF_DRV(RADEON_FREE, radeon_mem_free, DRM_AUTH),
3252 DRM_IOCTL_DEF(DRM_RADEON_INIT_HEAP, radeon_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 3252 DRM_IOCTL_DEF_DRV(RADEON_INIT_HEAP, radeon_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
3253 DRM_IOCTL_DEF(DRM_RADEON_IRQ_EMIT, radeon_irq_emit, DRM_AUTH), 3253 DRM_IOCTL_DEF_DRV(RADEON_IRQ_EMIT, radeon_irq_emit, DRM_AUTH),
3254 DRM_IOCTL_DEF(DRM_RADEON_IRQ_WAIT, radeon_irq_wait, DRM_AUTH), 3254 DRM_IOCTL_DEF_DRV(RADEON_IRQ_WAIT, radeon_irq_wait, DRM_AUTH),
3255 DRM_IOCTL_DEF(DRM_RADEON_SETPARAM, radeon_cp_setparam, DRM_AUTH), 3255 DRM_IOCTL_DEF_DRV(RADEON_SETPARAM, radeon_cp_setparam, DRM_AUTH),
3256 DRM_IOCTL_DEF(DRM_RADEON_SURF_ALLOC, radeon_surface_alloc, DRM_AUTH), 3256 DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, radeon_surface_alloc, DRM_AUTH),
3257 DRM_IOCTL_DEF(DRM_RADEON_SURF_FREE, radeon_surface_free, DRM_AUTH), 3257 DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, radeon_surface_free, DRM_AUTH),
3258 DRM_IOCTL_DEF(DRM_RADEON_CS, r600_cs_legacy_ioctl, DRM_AUTH) 3258 DRM_IOCTL_DEF_DRV(RADEON_CS, r600_cs_legacy_ioctl, DRM_AUTH)
3259}; 3259};
3260 3260
3261int radeon_max_ioctl = DRM_ARRAY_SIZE(radeon_ioctls); 3261int radeon_max_ioctl = DRM_ARRAY_SIZE(radeon_ioctls);
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index cc05b230d7ef..51d5f7b5ab21 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -693,6 +693,7 @@ void rs600_mc_init(struct radeon_device *rdev)
693 rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); 693 rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
694 rdev->mc.mc_vram_size = rdev->mc.real_vram_size; 694 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
695 rdev->mc.visible_vram_size = rdev->mc.aper_size; 695 rdev->mc.visible_vram_size = rdev->mc.aper_size;
696 rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
696 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); 697 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
697 base = RREG32_MC(R_000004_MC_FB_LOCATION); 698 base = RREG32_MC(R_000004_MC_FB_LOCATION);
698 base = G_000004_MC_FB_START(base) << 16; 699 base = G_000004_MC_FB_START(base) << 16;
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 3e3f75718be3..4dc2a87ea680 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -157,6 +157,7 @@ void rs690_mc_init(struct radeon_device *rdev)
157 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); 157 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
158 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); 158 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
159 rdev->mc.visible_vram_size = rdev->mc.aper_size; 159 rdev->mc.visible_vram_size = rdev->mc.aper_size;
160 rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
160 base = RREG32_MC(R_000100_MCCFG_FB_LOCATION); 161 base = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
161 base = G_000100_MC_FB_START(base) << 16; 162 base = G_000100_MC_FB_START(base) << 16;
162 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); 163 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index f1c796810117..9490da700749 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -267,6 +267,7 @@ static void rv770_mc_program(struct radeon_device *rdev)
267 */ 267 */
268void r700_cp_stop(struct radeon_device *rdev) 268void r700_cp_stop(struct radeon_device *rdev)
269{ 269{
270 rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
270 WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT)); 271 WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
271} 272}
272 273
@@ -905,6 +906,54 @@ static void rv770_gpu_init(struct radeon_device *rdev)
905 906
906} 907}
907 908
909static int rv770_vram_scratch_init(struct radeon_device *rdev)
910{
911 int r;
912 u64 gpu_addr;
913
914 if (rdev->vram_scratch.robj == NULL) {
915 r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE,
916 true, RADEON_GEM_DOMAIN_VRAM,
917 &rdev->vram_scratch.robj);
918 if (r) {
919 return r;
920 }
921 }
922
923 r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
924 if (unlikely(r != 0))
925 return r;
926 r = radeon_bo_pin(rdev->vram_scratch.robj,
927 RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
928 if (r) {
929 radeon_bo_unreserve(rdev->vram_scratch.robj);
930 return r;
931 }
932 r = radeon_bo_kmap(rdev->vram_scratch.robj,
933 (void **)&rdev->vram_scratch.ptr);
934 if (r)
935 radeon_bo_unpin(rdev->vram_scratch.robj);
936 radeon_bo_unreserve(rdev->vram_scratch.robj);
937
938 return r;
939}
940
941static void rv770_vram_scratch_fini(struct radeon_device *rdev)
942{
943 int r;
944
945 if (rdev->vram_scratch.robj == NULL) {
946 return;
947 }
948 r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
949 if (likely(r == 0)) {
950 radeon_bo_kunmap(rdev->vram_scratch.robj);
951 radeon_bo_unpin(rdev->vram_scratch.robj);
952 radeon_bo_unreserve(rdev->vram_scratch.robj);
953 }
954 radeon_bo_unref(&rdev->vram_scratch.robj);
955}
956
908int rv770_mc_init(struct radeon_device *rdev) 957int rv770_mc_init(struct radeon_device *rdev)
909{ 958{
910 u32 tmp; 959 u32 tmp;
@@ -944,6 +993,7 @@ int rv770_mc_init(struct radeon_device *rdev)
944 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); 993 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
945 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); 994 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
946 rdev->mc.visible_vram_size = rdev->mc.aper_size; 995 rdev->mc.visible_vram_size = rdev->mc.aper_size;
996 rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
947 r600_vram_gtt_location(rdev, &rdev->mc); 997 r600_vram_gtt_location(rdev, &rdev->mc);
948 radeon_update_bandwidth_info(rdev); 998 radeon_update_bandwidth_info(rdev);
949 999
@@ -970,6 +1020,9 @@ static int rv770_startup(struct radeon_device *rdev)
970 if (r) 1020 if (r)
971 return r; 1021 return r;
972 } 1022 }
1023 r = rv770_vram_scratch_init(rdev);
1024 if (r)
1025 return r;
973 rv770_gpu_init(rdev); 1026 rv770_gpu_init(rdev);
974 r = r600_blit_init(rdev); 1027 r = r600_blit_init(rdev);
975 if (r) { 1028 if (r) {
@@ -1023,11 +1076,6 @@ int rv770_resume(struct radeon_device *rdev)
1023 */ 1076 */
1024 /* post card */ 1077 /* post card */
1025 atom_asic_init(rdev->mode_info.atom_context); 1078 atom_asic_init(rdev->mode_info.atom_context);
1026 /* Initialize clocks */
1027 r = radeon_clocks_init(rdev);
1028 if (r) {
1029 return r;
1030 }
1031 1079
1032 r = rv770_startup(rdev); 1080 r = rv770_startup(rdev);
1033 if (r) { 1081 if (r) {
@@ -1118,9 +1166,6 @@ int rv770_init(struct radeon_device *rdev)
1118 radeon_surface_init(rdev); 1166 radeon_surface_init(rdev);
1119 /* Initialize clocks */ 1167 /* Initialize clocks */
1120 radeon_get_clock_info(rdev->ddev); 1168 radeon_get_clock_info(rdev->ddev);
1121 r = radeon_clocks_init(rdev);
1122 if (r)
1123 return r;
1124 /* Fence driver */ 1169 /* Fence driver */
1125 r = radeon_fence_driver_init(rdev); 1170 r = radeon_fence_driver_init(rdev);
1126 if (r) 1171 if (r)
@@ -1195,9 +1240,9 @@ void rv770_fini(struct radeon_device *rdev)
1195 r600_irq_fini(rdev); 1240 r600_irq_fini(rdev);
1196 radeon_irq_kms_fini(rdev); 1241 radeon_irq_kms_fini(rdev);
1197 rv770_pcie_gart_fini(rdev); 1242 rv770_pcie_gart_fini(rdev);
1243 rv770_vram_scratch_fini(rdev);
1198 radeon_gem_fini(rdev); 1244 radeon_gem_fini(rdev);
1199 radeon_fence_driver_fini(rdev); 1245 radeon_fence_driver_fini(rdev);
1200 radeon_clocks_fini(rdev);
1201 radeon_agp_fini(rdev); 1246 radeon_agp_fini(rdev);
1202 radeon_bo_fini(rdev); 1247 radeon_bo_fini(rdev);
1203 radeon_atombios_fini(rdev); 1248 radeon_atombios_fini(rdev);
diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c
index 976dc8d25280..bf5f83ea14fe 100644
--- a/drivers/gpu/drm/savage/savage_bci.c
+++ b/drivers/gpu/drm/savage/savage_bci.c
@@ -1082,10 +1082,10 @@ void savage_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv)
1082} 1082}
1083 1083
1084struct drm_ioctl_desc savage_ioctls[] = { 1084struct drm_ioctl_desc savage_ioctls[] = {
1085 DRM_IOCTL_DEF(DRM_SAVAGE_BCI_INIT, savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1085 DRM_IOCTL_DEF_DRV(SAVAGE_BCI_INIT, savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1086 DRM_IOCTL_DEF(DRM_SAVAGE_BCI_CMDBUF, savage_bci_cmdbuf, DRM_AUTH), 1086 DRM_IOCTL_DEF_DRV(SAVAGE_BCI_CMDBUF, savage_bci_cmdbuf, DRM_AUTH),
1087 DRM_IOCTL_DEF(DRM_SAVAGE_BCI_EVENT_EMIT, savage_bci_event_emit, DRM_AUTH), 1087 DRM_IOCTL_DEF_DRV(SAVAGE_BCI_EVENT_EMIT, savage_bci_event_emit, DRM_AUTH),
1088 DRM_IOCTL_DEF(DRM_SAVAGE_BCI_EVENT_WAIT, savage_bci_event_wait, DRM_AUTH), 1088 DRM_IOCTL_DEF_DRV(SAVAGE_BCI_EVENT_WAIT, savage_bci_event_wait, DRM_AUTH),
1089}; 1089};
1090 1090
1091int savage_max_ioctl = DRM_ARRAY_SIZE(savage_ioctls); 1091int savage_max_ioctl = DRM_ARRAY_SIZE(savage_ioctls);
diff --git a/drivers/gpu/drm/sis/sis_mm.c b/drivers/gpu/drm/sis/sis_mm.c
index 07d0f2979cac..7fe2b63412ce 100644
--- a/drivers/gpu/drm/sis/sis_mm.c
+++ b/drivers/gpu/drm/sis/sis_mm.c
@@ -320,12 +320,12 @@ void sis_reclaim_buffers_locked(struct drm_device *dev,
320} 320}
321 321
322struct drm_ioctl_desc sis_ioctls[] = { 322struct drm_ioctl_desc sis_ioctls[] = {
323 DRM_IOCTL_DEF(DRM_SIS_FB_ALLOC, sis_fb_alloc, DRM_AUTH), 323 DRM_IOCTL_DEF_DRV(SIS_FB_ALLOC, sis_fb_alloc, DRM_AUTH),
324 DRM_IOCTL_DEF(DRM_SIS_FB_FREE, sis_drm_free, DRM_AUTH), 324 DRM_IOCTL_DEF_DRV(SIS_FB_FREE, sis_drm_free, DRM_AUTH),
325 DRM_IOCTL_DEF(DRM_SIS_AGP_INIT, sis_ioctl_agp_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY), 325 DRM_IOCTL_DEF_DRV(SIS_AGP_INIT, sis_ioctl_agp_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
326 DRM_IOCTL_DEF(DRM_SIS_AGP_ALLOC, sis_ioctl_agp_alloc, DRM_AUTH), 326 DRM_IOCTL_DEF_DRV(SIS_AGP_ALLOC, sis_ioctl_agp_alloc, DRM_AUTH),
327 DRM_IOCTL_DEF(DRM_SIS_AGP_FREE, sis_drm_free, DRM_AUTH), 327 DRM_IOCTL_DEF_DRV(SIS_AGP_FREE, sis_drm_free, DRM_AUTH),
328 DRM_IOCTL_DEF(DRM_SIS_FB_INIT, sis_fb_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY), 328 DRM_IOCTL_DEF_DRV(SIS_FB_INIT, sis_fb_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
329}; 329};
330 330
331int sis_max_ioctl = DRM_ARRAY_SIZE(sis_ioctls); 331int sis_max_ioctl = DRM_ARRAY_SIZE(sis_ioctls);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index cb4cf7ef4d1e..db809e034cc4 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -442,6 +442,43 @@ out_err:
442} 442}
443 443
444/** 444/**
445 * Call bo::reserved and with the lru lock held.
446 * Will release GPU memory type usage on destruction.
447 * This is the place to put in driver specific hooks.
448 * Will release the bo::reserved lock and the
449 * lru lock on exit.
450 */
451
452static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
453{
454 struct ttm_bo_global *glob = bo->glob;
455
456 if (bo->ttm) {
457
458 /**
459 * Release the lru_lock, since we don't want to have
460 * an atomic requirement on ttm_tt[unbind|destroy].
461 */
462
463 spin_unlock(&glob->lru_lock);
464 ttm_tt_unbind(bo->ttm);
465 ttm_tt_destroy(bo->ttm);
466 bo->ttm = NULL;
467 spin_lock(&glob->lru_lock);
468 }
469
470 if (bo->mem.mm_node) {
471 drm_mm_put_block(bo->mem.mm_node);
472 bo->mem.mm_node = NULL;
473 }
474
475 atomic_set(&bo->reserved, 0);
476 wake_up_all(&bo->event_queue);
477 spin_unlock(&glob->lru_lock);
478}
479
480
481/**
445 * If bo idle, remove from delayed- and lru lists, and unref. 482 * If bo idle, remove from delayed- and lru lists, and unref.
446 * If not idle, and already on delayed list, do nothing. 483 * If not idle, and already on delayed list, do nothing.
447 * If not idle, and not on delayed list, put on delayed list, 484 * If not idle, and not on delayed list, put on delayed list,
@@ -456,6 +493,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
456 int ret; 493 int ret;
457 494
458 spin_lock(&bo->lock); 495 spin_lock(&bo->lock);
496retry:
459 (void) ttm_bo_wait(bo, false, false, !remove_all); 497 (void) ttm_bo_wait(bo, false, false, !remove_all);
460 498
461 if (!bo->sync_obj) { 499 if (!bo->sync_obj) {
@@ -464,31 +502,52 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
464 spin_unlock(&bo->lock); 502 spin_unlock(&bo->lock);
465 503
466 spin_lock(&glob->lru_lock); 504 spin_lock(&glob->lru_lock);
467 put_count = ttm_bo_del_from_lru(bo); 505 ret = ttm_bo_reserve_locked(bo, false, !remove_all, false, 0);
506
507 /**
508 * Someone else has the object reserved. Bail and retry.
509 */
468 510
469 ret = ttm_bo_reserve_locked(bo, false, false, false, 0); 511 if (unlikely(ret == -EBUSY)) {
470 BUG_ON(ret); 512 spin_unlock(&glob->lru_lock);
471 if (bo->ttm) 513 spin_lock(&bo->lock);
472 ttm_tt_unbind(bo->ttm); 514 goto requeue;
515 }
516
517 /**
518 * We can re-check for sync object without taking
519 * the bo::lock since setting the sync object requires
520 * also bo::reserved. A busy object at this point may
521 * be caused by another thread starting an accelerated
522 * eviction.
523 */
524
525 if (unlikely(bo->sync_obj)) {
526 atomic_set(&bo->reserved, 0);
527 wake_up_all(&bo->event_queue);
528 spin_unlock(&glob->lru_lock);
529 spin_lock(&bo->lock);
530 if (remove_all)
531 goto retry;
532 else
533 goto requeue;
534 }
535
536 put_count = ttm_bo_del_from_lru(bo);
473 537
474 if (!list_empty(&bo->ddestroy)) { 538 if (!list_empty(&bo->ddestroy)) {
475 list_del_init(&bo->ddestroy); 539 list_del_init(&bo->ddestroy);
476 ++put_count; 540 ++put_count;
477 } 541 }
478 if (bo->mem.mm_node) {
479 drm_mm_put_block(bo->mem.mm_node);
480 bo->mem.mm_node = NULL;
481 }
482 spin_unlock(&glob->lru_lock);
483 542
484 atomic_set(&bo->reserved, 0); 543 ttm_bo_cleanup_memtype_use(bo);
485 544
486 while (put_count--) 545 while (put_count--)
487 kref_put(&bo->list_kref, ttm_bo_ref_bug); 546 kref_put(&bo->list_kref, ttm_bo_ref_bug);
488 547
489 return 0; 548 return 0;
490 } 549 }
491 550requeue:
492 spin_lock(&glob->lru_lock); 551 spin_lock(&glob->lru_lock);
493 if (list_empty(&bo->ddestroy)) { 552 if (list_empty(&bo->ddestroy)) {
494 void *sync_obj = bo->sync_obj; 553 void *sync_obj = bo->sync_obj;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 7cffb3e04232..3451a82adba7 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -351,6 +351,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
351 INIT_LIST_HEAD(&fbo->lru); 351 INIT_LIST_HEAD(&fbo->lru);
352 INIT_LIST_HEAD(&fbo->swap); 352 INIT_LIST_HEAD(&fbo->swap);
353 fbo->vm_node = NULL; 353 fbo->vm_node = NULL;
354 atomic_set(&fbo->cpu_writers, 0);
354 355
355 fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj); 356 fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
356 kref_init(&fbo->list_kref); 357 kref_init(&fbo->list_kref);
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index ca904799f018..b1e02fffd3cc 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -69,7 +69,7 @@ struct ttm_page_pool {
69 spinlock_t lock; 69 spinlock_t lock;
70 bool fill_lock; 70 bool fill_lock;
71 struct list_head list; 71 struct list_head list;
72 int gfp_flags; 72 gfp_t gfp_flags;
73 unsigned npages; 73 unsigned npages;
74 char *name; 74 char *name;
75 unsigned long nfrees; 75 unsigned long nfrees;
@@ -475,7 +475,7 @@ static void ttm_handle_caching_state_failure(struct list_head *pages,
475 * This function is reentrant if caller updates count depending on number of 475 * This function is reentrant if caller updates count depending on number of
476 * pages returned in pages array. 476 * pages returned in pages array.
477 */ 477 */
478static int ttm_alloc_new_pages(struct list_head *pages, int gfp_flags, 478static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
479 int ttm_flags, enum ttm_caching_state cstate, unsigned count) 479 int ttm_flags, enum ttm_caching_state cstate, unsigned count)
480{ 480{
481 struct page **caching_array; 481 struct page **caching_array;
@@ -666,7 +666,7 @@ int ttm_get_pages(struct list_head *pages, int flags,
666{ 666{
667 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); 667 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
668 struct page *p = NULL; 668 struct page *p = NULL;
669 int gfp_flags = GFP_USER; 669 gfp_t gfp_flags = GFP_USER;
670 int r; 670 int r;
671 671
672 /* set zero flag for page allocation if required */ 672 /* set zero flag for page allocation if required */
@@ -818,7 +818,7 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
818 return 0; 818 return 0;
819} 819}
820 820
821void ttm_page_alloc_fini() 821void ttm_page_alloc_fini(void)
822{ 822{
823 int i; 823 int i;
824 824
diff --git a/drivers/gpu/drm/via/via_dma.c b/drivers/gpu/drm/via/via_dma.c
index 68dda74a50ae..cc0ffa9abd00 100644
--- a/drivers/gpu/drm/via/via_dma.c
+++ b/drivers/gpu/drm/via/via_dma.c
@@ -722,20 +722,20 @@ static int via_cmdbuf_size(struct drm_device *dev, void *data, struct drm_file *
722} 722}
723 723
724struct drm_ioctl_desc via_ioctls[] = { 724struct drm_ioctl_desc via_ioctls[] = {
725 DRM_IOCTL_DEF(DRM_VIA_ALLOCMEM, via_mem_alloc, DRM_AUTH), 725 DRM_IOCTL_DEF_DRV(VIA_ALLOCMEM, via_mem_alloc, DRM_AUTH),
726 DRM_IOCTL_DEF(DRM_VIA_FREEMEM, via_mem_free, DRM_AUTH), 726 DRM_IOCTL_DEF_DRV(VIA_FREEMEM, via_mem_free, DRM_AUTH),
727 DRM_IOCTL_DEF(DRM_VIA_AGP_INIT, via_agp_init, DRM_AUTH|DRM_MASTER), 727 DRM_IOCTL_DEF_DRV(VIA_AGP_INIT, via_agp_init, DRM_AUTH|DRM_MASTER),
728 DRM_IOCTL_DEF(DRM_VIA_FB_INIT, via_fb_init, DRM_AUTH|DRM_MASTER), 728 DRM_IOCTL_DEF_DRV(VIA_FB_INIT, via_fb_init, DRM_AUTH|DRM_MASTER),
729 DRM_IOCTL_DEF(DRM_VIA_MAP_INIT, via_map_init, DRM_AUTH|DRM_MASTER), 729 DRM_IOCTL_DEF_DRV(VIA_MAP_INIT, via_map_init, DRM_AUTH|DRM_MASTER),
730 DRM_IOCTL_DEF(DRM_VIA_DEC_FUTEX, via_decoder_futex, DRM_AUTH), 730 DRM_IOCTL_DEF_DRV(VIA_DEC_FUTEX, via_decoder_futex, DRM_AUTH),
731 DRM_IOCTL_DEF(DRM_VIA_DMA_INIT, via_dma_init, DRM_AUTH), 731 DRM_IOCTL_DEF_DRV(VIA_DMA_INIT, via_dma_init, DRM_AUTH),
732 DRM_IOCTL_DEF(DRM_VIA_CMDBUFFER, via_cmdbuffer, DRM_AUTH), 732 DRM_IOCTL_DEF_DRV(VIA_CMDBUFFER, via_cmdbuffer, DRM_AUTH),
733 DRM_IOCTL_DEF(DRM_VIA_FLUSH, via_flush_ioctl, DRM_AUTH), 733 DRM_IOCTL_DEF_DRV(VIA_FLUSH, via_flush_ioctl, DRM_AUTH),
734 DRM_IOCTL_DEF(DRM_VIA_PCICMD, via_pci_cmdbuffer, DRM_AUTH), 734 DRM_IOCTL_DEF_DRV(VIA_PCICMD, via_pci_cmdbuffer, DRM_AUTH),
735 DRM_IOCTL_DEF(DRM_VIA_CMDBUF_SIZE, via_cmdbuf_size, DRM_AUTH), 735 DRM_IOCTL_DEF_DRV(VIA_CMDBUF_SIZE, via_cmdbuf_size, DRM_AUTH),
736 DRM_IOCTL_DEF(DRM_VIA_WAIT_IRQ, via_wait_irq, DRM_AUTH), 736 DRM_IOCTL_DEF_DRV(VIA_WAIT_IRQ, via_wait_irq, DRM_AUTH),
737 DRM_IOCTL_DEF(DRM_VIA_DMA_BLIT, via_dma_blit, DRM_AUTH), 737 DRM_IOCTL_DEF_DRV(VIA_DMA_BLIT, via_dma_blit, DRM_AUTH),
738 DRM_IOCTL_DEF(DRM_VIA_BLIT_SYNC, via_dma_blit_sync, DRM_AUTH) 738 DRM_IOCTL_DEF_DRV(VIA_BLIT_SYNC, via_dma_blit_sync, DRM_AUTH)
739}; 739};
740 740
741int via_max_ioctl = DRM_ARRAY_SIZE(via_ioctls); 741int via_max_ioctl = DRM_ARRAY_SIZE(via_ioctls);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 9dd395b90216..a96ed6d9d010 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -99,47 +99,47 @@
99 */ 99 */
100 100
101#define VMW_IOCTL_DEF(ioctl, func, flags) \ 101#define VMW_IOCTL_DEF(ioctl, func, flags) \
102 [DRM_IOCTL_NR(ioctl) - DRM_COMMAND_BASE] = {ioctl, flags, func} 102 [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_##ioctl, flags, func, DRM_IOCTL_##ioctl}
103 103
104/** 104/**
105 * Ioctl definitions. 105 * Ioctl definitions.
106 */ 106 */
107 107
108static struct drm_ioctl_desc vmw_ioctls[] = { 108static struct drm_ioctl_desc vmw_ioctls[] = {
109 VMW_IOCTL_DEF(DRM_IOCTL_VMW_GET_PARAM, vmw_getparam_ioctl, 109 VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
110 DRM_AUTH | DRM_UNLOCKED), 110 DRM_AUTH | DRM_UNLOCKED),
111 VMW_IOCTL_DEF(DRM_IOCTL_VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl, 111 VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
112 DRM_AUTH | DRM_UNLOCKED), 112 DRM_AUTH | DRM_UNLOCKED),
113 VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl, 113 VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
114 DRM_AUTH | DRM_UNLOCKED), 114 DRM_AUTH | DRM_UNLOCKED),
115 VMW_IOCTL_DEF(DRM_IOCTL_VMW_CURSOR_BYPASS, 115 VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
116 vmw_kms_cursor_bypass_ioctl, 116 vmw_kms_cursor_bypass_ioctl,
117 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), 117 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
118 118
119 VMW_IOCTL_DEF(DRM_IOCTL_VMW_CONTROL_STREAM, vmw_overlay_ioctl, 119 VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
120 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), 120 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
121 VMW_IOCTL_DEF(DRM_IOCTL_VMW_CLAIM_STREAM, vmw_stream_claim_ioctl, 121 VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
122 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), 122 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
123 VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_STREAM, vmw_stream_unref_ioctl, 123 VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
124 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), 124 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
125 125
126 VMW_IOCTL_DEF(DRM_IOCTL_VMW_CREATE_CONTEXT, vmw_context_define_ioctl, 126 VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
127 DRM_AUTH | DRM_UNLOCKED), 127 DRM_AUTH | DRM_UNLOCKED),
128 VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl, 128 VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
129 DRM_AUTH | DRM_UNLOCKED), 129 DRM_AUTH | DRM_UNLOCKED),
130 VMW_IOCTL_DEF(DRM_IOCTL_VMW_CREATE_SURFACE, vmw_surface_define_ioctl, 130 VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
131 DRM_AUTH | DRM_UNLOCKED), 131 DRM_AUTH | DRM_UNLOCKED),
132 VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl, 132 VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
133 DRM_AUTH | DRM_UNLOCKED), 133 DRM_AUTH | DRM_UNLOCKED),
134 VMW_IOCTL_DEF(DRM_IOCTL_VMW_REF_SURFACE, vmw_surface_reference_ioctl, 134 VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
135 DRM_AUTH | DRM_UNLOCKED), 135 DRM_AUTH | DRM_UNLOCKED),
136 VMW_IOCTL_DEF(DRM_IOCTL_VMW_EXECBUF, vmw_execbuf_ioctl, 136 VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl,
137 DRM_AUTH | DRM_UNLOCKED), 137 DRM_AUTH | DRM_UNLOCKED),
138 VMW_IOCTL_DEF(DRM_IOCTL_VMW_FIFO_DEBUG, vmw_fifo_debug_ioctl, 138 VMW_IOCTL_DEF(VMW_FIFO_DEBUG, vmw_fifo_debug_ioctl,
139 DRM_AUTH | DRM_ROOT_ONLY | DRM_MASTER | DRM_UNLOCKED), 139 DRM_AUTH | DRM_ROOT_ONLY | DRM_MASTER | DRM_UNLOCKED),
140 VMW_IOCTL_DEF(DRM_IOCTL_VMW_FENCE_WAIT, vmw_fence_wait_ioctl, 140 VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_wait_ioctl,
141 DRM_AUTH | DRM_UNLOCKED), 141 DRM_AUTH | DRM_UNLOCKED),
142 VMW_IOCTL_DEF(DRM_IOCTL_VMW_UPDATE_LAYOUT, vmw_kms_update_layout_ioctl, 142 VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT, vmw_kms_update_layout_ioctl,
143 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED) 143 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED)
144}; 144};
145 145
@@ -148,13 +148,16 @@ static struct pci_device_id vmw_pci_id_list[] = {
148 {0, 0, 0} 148 {0, 0, 0}
149}; 149};
150 150
151static char *vmw_devname = "vmwgfx"; 151static int enable_fbdev;
152 152
153static int vmw_probe(struct pci_dev *, const struct pci_device_id *); 153static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
154static void vmw_master_init(struct vmw_master *); 154static void vmw_master_init(struct vmw_master *);
155static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, 155static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
156 void *ptr); 156 void *ptr);
157 157
158MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
159module_param_named(enable_fbdev, enable_fbdev, int, 0600);
160
158static void vmw_print_capabilities(uint32_t capabilities) 161static void vmw_print_capabilities(uint32_t capabilities)
159{ 162{
160 DRM_INFO("Capabilities:\n"); 163 DRM_INFO("Capabilities:\n");
@@ -192,8 +195,6 @@ static int vmw_request_device(struct vmw_private *dev_priv)
192{ 195{
193 int ret; 196 int ret;
194 197
195 vmw_kms_save_vga(dev_priv);
196
197 ret = vmw_fifo_init(dev_priv, &dev_priv->fifo); 198 ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
198 if (unlikely(ret != 0)) { 199 if (unlikely(ret != 0)) {
199 DRM_ERROR("Unable to initialize FIFO.\n"); 200 DRM_ERROR("Unable to initialize FIFO.\n");
@@ -206,9 +207,35 @@ static int vmw_request_device(struct vmw_private *dev_priv)
206static void vmw_release_device(struct vmw_private *dev_priv) 207static void vmw_release_device(struct vmw_private *dev_priv)
207{ 208{
208 vmw_fifo_release(dev_priv, &dev_priv->fifo); 209 vmw_fifo_release(dev_priv, &dev_priv->fifo);
209 vmw_kms_restore_vga(dev_priv);
210} 210}
211 211
212int vmw_3d_resource_inc(struct vmw_private *dev_priv)
213{
214 int ret = 0;
215
216 mutex_lock(&dev_priv->release_mutex);
217 if (unlikely(dev_priv->num_3d_resources++ == 0)) {
218 ret = vmw_request_device(dev_priv);
219 if (unlikely(ret != 0))
220 --dev_priv->num_3d_resources;
221 }
222 mutex_unlock(&dev_priv->release_mutex);
223 return ret;
224}
225
226
227void vmw_3d_resource_dec(struct vmw_private *dev_priv)
228{
229 int32_t n3d;
230
231 mutex_lock(&dev_priv->release_mutex);
232 if (unlikely(--dev_priv->num_3d_resources == 0))
233 vmw_release_device(dev_priv);
234 n3d = (int32_t) dev_priv->num_3d_resources;
235 mutex_unlock(&dev_priv->release_mutex);
236
237 BUG_ON(n3d < 0);
238}
212 239
213static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) 240static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
214{ 241{
@@ -228,6 +255,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
228 dev_priv->last_read_sequence = (uint32_t) -100; 255 dev_priv->last_read_sequence = (uint32_t) -100;
229 mutex_init(&dev_priv->hw_mutex); 256 mutex_init(&dev_priv->hw_mutex);
230 mutex_init(&dev_priv->cmdbuf_mutex); 257 mutex_init(&dev_priv->cmdbuf_mutex);
258 mutex_init(&dev_priv->release_mutex);
231 rwlock_init(&dev_priv->resource_lock); 259 rwlock_init(&dev_priv->resource_lock);
232 idr_init(&dev_priv->context_idr); 260 idr_init(&dev_priv->context_idr);
233 idr_init(&dev_priv->surface_idr); 261 idr_init(&dev_priv->surface_idr);
@@ -244,6 +272,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
244 dev_priv->vram_start = pci_resource_start(dev->pdev, 1); 272 dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
245 dev_priv->mmio_start = pci_resource_start(dev->pdev, 2); 273 dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
246 274
275 dev_priv->enable_fb = enable_fbdev;
276
247 mutex_lock(&dev_priv->hw_mutex); 277 mutex_lock(&dev_priv->hw_mutex);
248 278
249 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); 279 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
@@ -343,17 +373,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
343 373
344 dev->dev_private = dev_priv; 374 dev->dev_private = dev_priv;
345 375
346 if (!dev->devname)
347 dev->devname = vmw_devname;
348
349 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
350 ret = drm_irq_install(dev);
351 if (unlikely(ret != 0)) {
352 DRM_ERROR("Failed installing irq: %d\n", ret);
353 goto out_no_irq;
354 }
355 }
356
357 ret = pci_request_regions(dev->pdev, "vmwgfx probe"); 376 ret = pci_request_regions(dev->pdev, "vmwgfx probe");
358 dev_priv->stealth = (ret != 0); 377 dev_priv->stealth = (ret != 0);
359 if (dev_priv->stealth) { 378 if (dev_priv->stealth) {
@@ -369,26 +388,52 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
369 goto out_no_device; 388 goto out_no_device;
370 } 389 }
371 } 390 }
372 ret = vmw_request_device(dev_priv); 391 ret = vmw_kms_init(dev_priv);
373 if (unlikely(ret != 0)) 392 if (unlikely(ret != 0))
374 goto out_no_device; 393 goto out_no_kms;
375 vmw_kms_init(dev_priv);
376 vmw_overlay_init(dev_priv); 394 vmw_overlay_init(dev_priv);
377 vmw_fb_init(dev_priv); 395 if (dev_priv->enable_fb) {
396 ret = vmw_3d_resource_inc(dev_priv);
397 if (unlikely(ret != 0))
398 goto out_no_fifo;
399 vmw_kms_save_vga(dev_priv);
400 vmw_fb_init(dev_priv);
401 DRM_INFO("%s", vmw_fifo_have_3d(dev_priv) ?
402 "Detected device 3D availability.\n" :
403 "Detected no device 3D availability.\n");
404 } else {
405 DRM_INFO("Delayed 3D detection since we're not "
406 "running the device in SVGA mode yet.\n");
407 }
408
409 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
410 ret = drm_irq_install(dev);
411 if (unlikely(ret != 0)) {
412 DRM_ERROR("Failed installing irq: %d\n", ret);
413 goto out_no_irq;
414 }
415 }
378 416
379 dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier; 417 dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
380 register_pm_notifier(&dev_priv->pm_nb); 418 register_pm_notifier(&dev_priv->pm_nb);
381 419
382 DRM_INFO("%s", vmw_fifo_have_3d(dev_priv) ? "Have 3D\n" : "No 3D\n");
383
384 return 0; 420 return 0;
385 421
386out_no_device:
387 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
388 drm_irq_uninstall(dev_priv->dev);
389 if (dev->devname == vmw_devname)
390 dev->devname = NULL;
391out_no_irq: 422out_no_irq:
423 if (dev_priv->enable_fb) {
424 vmw_fb_close(dev_priv);
425 vmw_kms_restore_vga(dev_priv);
426 vmw_3d_resource_dec(dev_priv);
427 }
428out_no_fifo:
429 vmw_overlay_close(dev_priv);
430 vmw_kms_close(dev_priv);
431out_no_kms:
432 if (dev_priv->stealth)
433 pci_release_region(dev->pdev, 2);
434 else
435 pci_release_regions(dev->pdev);
436out_no_device:
392 ttm_object_device_release(&dev_priv->tdev); 437 ttm_object_device_release(&dev_priv->tdev);
393out_err4: 438out_err4:
394 iounmap(dev_priv->mmio_virt); 439 iounmap(dev_priv->mmio_virt);
@@ -415,19 +460,20 @@ static int vmw_driver_unload(struct drm_device *dev)
415 460
416 unregister_pm_notifier(&dev_priv->pm_nb); 461 unregister_pm_notifier(&dev_priv->pm_nb);
417 462
418 vmw_fb_close(dev_priv); 463 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
464 drm_irq_uninstall(dev_priv->dev);
465 if (dev_priv->enable_fb) {
466 vmw_fb_close(dev_priv);
467 vmw_kms_restore_vga(dev_priv);
468 vmw_3d_resource_dec(dev_priv);
469 }
419 vmw_kms_close(dev_priv); 470 vmw_kms_close(dev_priv);
420 vmw_overlay_close(dev_priv); 471 vmw_overlay_close(dev_priv);
421 vmw_release_device(dev_priv);
422 if (dev_priv->stealth) 472 if (dev_priv->stealth)
423 pci_release_region(dev->pdev, 2); 473 pci_release_region(dev->pdev, 2);
424 else 474 else
425 pci_release_regions(dev->pdev); 475 pci_release_regions(dev->pdev);
426 476
427 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
428 drm_irq_uninstall(dev_priv->dev);
429 if (dev->devname == vmw_devname)
430 dev->devname = NULL;
431 ttm_object_device_release(&dev_priv->tdev); 477 ttm_object_device_release(&dev_priv->tdev);
432 iounmap(dev_priv->mmio_virt); 478 iounmap(dev_priv->mmio_virt);
433 drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start, 479 drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
@@ -500,7 +546,7 @@ static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
500 struct drm_ioctl_desc *ioctl = 546 struct drm_ioctl_desc *ioctl =
501 &vmw_ioctls[nr - DRM_COMMAND_BASE]; 547 &vmw_ioctls[nr - DRM_COMMAND_BASE];
502 548
503 if (unlikely(ioctl->cmd != cmd)) { 549 if (unlikely(ioctl->cmd_drv != cmd)) {
504 DRM_ERROR("Invalid command format, ioctl %d\n", 550 DRM_ERROR("Invalid command format, ioctl %d\n",
505 nr - DRM_COMMAND_BASE); 551 nr - DRM_COMMAND_BASE);
506 return -EINVAL; 552 return -EINVAL;
@@ -589,6 +635,16 @@ static int vmw_master_set(struct drm_device *dev,
589 struct vmw_master *vmaster = vmw_master(file_priv->master); 635 struct vmw_master *vmaster = vmw_master(file_priv->master);
590 int ret = 0; 636 int ret = 0;
591 637
638 if (!dev_priv->enable_fb) {
639 ret = vmw_3d_resource_inc(dev_priv);
640 if (unlikely(ret != 0))
641 return ret;
642 vmw_kms_save_vga(dev_priv);
643 mutex_lock(&dev_priv->hw_mutex);
644 vmw_write(dev_priv, SVGA_REG_TRACES, 0);
645 mutex_unlock(&dev_priv->hw_mutex);
646 }
647
592 if (active) { 648 if (active) {
593 BUG_ON(active != &dev_priv->fbdev_master); 649 BUG_ON(active != &dev_priv->fbdev_master);
594 ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile); 650 ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
@@ -617,7 +673,13 @@ static int vmw_master_set(struct drm_device *dev,
617 return 0; 673 return 0;
618 674
619out_no_active_lock: 675out_no_active_lock:
620 vmw_release_device(dev_priv); 676 if (!dev_priv->enable_fb) {
677 mutex_lock(&dev_priv->hw_mutex);
678 vmw_write(dev_priv, SVGA_REG_TRACES, 1);
679 mutex_unlock(&dev_priv->hw_mutex);
680 vmw_kms_restore_vga(dev_priv);
681 vmw_3d_resource_dec(dev_priv);
682 }
621 return ret; 683 return ret;
622} 684}
623 685
@@ -645,11 +707,23 @@ static void vmw_master_drop(struct drm_device *dev,
645 707
646 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); 708 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
647 709
710 if (!dev_priv->enable_fb) {
711 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
712 if (unlikely(ret != 0))
713 DRM_ERROR("Unable to clean VRAM on master drop.\n");
714 mutex_lock(&dev_priv->hw_mutex);
715 vmw_write(dev_priv, SVGA_REG_TRACES, 1);
716 mutex_unlock(&dev_priv->hw_mutex);
717 vmw_kms_restore_vga(dev_priv);
718 vmw_3d_resource_dec(dev_priv);
719 }
720
648 dev_priv->active_master = &dev_priv->fbdev_master; 721 dev_priv->active_master = &dev_priv->fbdev_master;
649 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); 722 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
650 ttm_vt_unlock(&dev_priv->fbdev_master.lock); 723 ttm_vt_unlock(&dev_priv->fbdev_master.lock);
651 724
652 vmw_fb_on(dev_priv); 725 if (dev_priv->enable_fb)
726 vmw_fb_on(dev_priv);
653} 727}
654 728
655 729
@@ -722,6 +796,7 @@ static struct drm_driver driver = {
722 .irq_postinstall = vmw_irq_postinstall, 796 .irq_postinstall = vmw_irq_postinstall,
723 .irq_uninstall = vmw_irq_uninstall, 797 .irq_uninstall = vmw_irq_uninstall,
724 .irq_handler = vmw_irq_handler, 798 .irq_handler = vmw_irq_handler,
799 .get_vblank_counter = vmw_get_vblank_counter,
725 .reclaim_buffers_locked = NULL, 800 .reclaim_buffers_locked = NULL,
726 .get_map_ofs = drm_core_get_map_ofs, 801 .get_map_ofs = drm_core_get_map_ofs,
727 .get_reg_ofs = drm_core_get_reg_ofs, 802 .get_reg_ofs = drm_core_get_reg_ofs,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 429f917b60bf..58de6393f611 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -277,6 +277,7 @@ struct vmw_private {
277 277
278 bool stealth; 278 bool stealth;
279 bool is_opened; 279 bool is_opened;
280 bool enable_fb;
280 281
281 /** 282 /**
282 * Master management. 283 * Master management.
@@ -285,6 +286,9 @@ struct vmw_private {
285 struct vmw_master *active_master; 286 struct vmw_master *active_master;
286 struct vmw_master fbdev_master; 287 struct vmw_master fbdev_master;
287 struct notifier_block pm_nb; 288 struct notifier_block pm_nb;
289
290 struct mutex release_mutex;
291 uint32_t num_3d_resources;
288}; 292};
289 293
290static inline struct vmw_private *vmw_priv(struct drm_device *dev) 294static inline struct vmw_private *vmw_priv(struct drm_device *dev)
@@ -319,6 +323,9 @@ static inline uint32_t vmw_read(struct vmw_private *dev_priv,
319 return val; 323 return val;
320} 324}
321 325
326int vmw_3d_resource_inc(struct vmw_private *dev_priv);
327void vmw_3d_resource_dec(struct vmw_private *dev_priv);
328
322/** 329/**
323 * GMR utilities - vmwgfx_gmr.c 330 * GMR utilities - vmwgfx_gmr.c
324 */ 331 */
@@ -511,6 +518,7 @@ void vmw_kms_write_svga(struct vmw_private *vmw_priv,
511 unsigned bbp, unsigned depth); 518 unsigned bbp, unsigned depth);
512int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, 519int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
513 struct drm_file *file_priv); 520 struct drm_file *file_priv);
521u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc);
514 522
515/** 523/**
516 * Overlay control - vmwgfx_overlay.c 524 * Overlay control - vmwgfx_overlay.c
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 870967a97c15..409e172f4abf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -615,6 +615,11 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
615 if (unlikely(ret != 0)) 615 if (unlikely(ret != 0))
616 goto err_unlock; 616 goto err_unlock;
617 617
618 if (bo->mem.mem_type == TTM_PL_VRAM &&
619 bo->mem.mm_node->start < bo->num_pages)
620 (void) ttm_bo_validate(bo, &vmw_sys_placement, false,
621 false, false);
622
618 ret = ttm_bo_validate(bo, &ne_placement, false, false, false); 623 ret = ttm_bo_validate(bo, &ne_placement, false, false, false);
619 624
620 /* Could probably bug on */ 625 /* Could probably bug on */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index e6a1eb7ea954..0fe31766e4cf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -106,6 +106,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
106 mutex_lock(&dev_priv->hw_mutex); 106 mutex_lock(&dev_priv->hw_mutex);
107 dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE); 107 dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
108 dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE); 108 dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
109 dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
109 vmw_write(dev_priv, SVGA_REG_ENABLE, 1); 110 vmw_write(dev_priv, SVGA_REG_ENABLE, 1);
110 111
111 min = 4; 112 min = 4;
@@ -175,6 +176,8 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
175 dev_priv->config_done_state); 176 dev_priv->config_done_state);
176 vmw_write(dev_priv, SVGA_REG_ENABLE, 177 vmw_write(dev_priv, SVGA_REG_ENABLE,
177 dev_priv->enable_state); 178 dev_priv->enable_state);
179 vmw_write(dev_priv, SVGA_REG_TRACES,
180 dev_priv->traces_state);
178 181
179 mutex_unlock(&dev_priv->hw_mutex); 182 mutex_unlock(&dev_priv->hw_mutex);
180 vmw_fence_queue_takedown(&fifo->fence_queue); 183 vmw_fence_queue_takedown(&fifo->fence_queue);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 64d7f47df868..e882ba099f0c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -898,7 +898,19 @@ int vmw_kms_save_vga(struct vmw_private *vmw_priv)
898 save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH); 898 save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH);
899 save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT); 899 save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT);
900 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); 900 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
901 if (i == 0 && vmw_priv->num_displays == 1 &&
902 save->width == 0 && save->height == 0) {
903
904 /*
905 * It should be fairly safe to assume that these
906 * values are uninitialized.
907 */
908
909 save->width = vmw_priv->vga_width - save->pos_x;
910 save->height = vmw_priv->vga_height - save->pos_y;
911 }
901 } 912 }
913
902 return 0; 914 return 0;
903} 915}
904 916
@@ -984,3 +996,8 @@ out_unlock:
984 ttm_read_unlock(&vmaster->lock); 996 ttm_read_unlock(&vmaster->lock);
985 return ret; 997 return ret;
986} 998}
999
1000u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc)
1001{
1002 return 0;
1003}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 2ff5cf78235f..11cb39e3accb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -27,6 +27,8 @@
27 27
28#include "vmwgfx_kms.h" 28#include "vmwgfx_kms.h"
29 29
30#define VMWGFX_LDU_NUM_DU 8
31
30#define vmw_crtc_to_ldu(x) \ 32#define vmw_crtc_to_ldu(x) \
31 container_of(x, struct vmw_legacy_display_unit, base.crtc) 33 container_of(x, struct vmw_legacy_display_unit, base.crtc)
32#define vmw_encoder_to_ldu(x) \ 34#define vmw_encoder_to_ldu(x) \
@@ -335,7 +337,8 @@ static void vmw_ldu_connector_restore(struct drm_connector *connector)
335} 337}
336 338
337static enum drm_connector_status 339static enum drm_connector_status
338 vmw_ldu_connector_detect(struct drm_connector *connector) 340 vmw_ldu_connector_detect(struct drm_connector *connector,
341 bool force)
339{ 342{
340 if (vmw_connector_to_ldu(connector)->pref_active) 343 if (vmw_connector_to_ldu(connector)->pref_active)
341 return connector_status_connected; 344 return connector_status_connected;
@@ -516,7 +519,7 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
516 519
517 drm_connector_init(dev, connector, &vmw_legacy_connector_funcs, 520 drm_connector_init(dev, connector, &vmw_legacy_connector_funcs,
518 DRM_MODE_CONNECTOR_LVDS); 521 DRM_MODE_CONNECTOR_LVDS);
519 connector->status = vmw_ldu_connector_detect(connector); 522 connector->status = vmw_ldu_connector_detect(connector, true);
520 523
521 drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs, 524 drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs,
522 DRM_MODE_ENCODER_LVDS); 525 DRM_MODE_ENCODER_LVDS);
@@ -535,6 +538,10 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
535 538
536int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv) 539int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
537{ 540{
541 struct drm_device *dev = dev_priv->dev;
542 int i;
543 int ret;
544
538 if (dev_priv->ldu_priv) { 545 if (dev_priv->ldu_priv) {
539 DRM_INFO("ldu system already on\n"); 546 DRM_INFO("ldu system already on\n");
540 return -EINVAL; 547 return -EINVAL;
@@ -552,23 +559,24 @@ int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
552 559
553 drm_mode_create_dirty_info_property(dev_priv->dev); 560 drm_mode_create_dirty_info_property(dev_priv->dev);
554 561
555 vmw_ldu_init(dev_priv, 0);
556 /* for old hardware without multimon only enable one display */
557 if (dev_priv->capabilities & SVGA_CAP_MULTIMON) { 562 if (dev_priv->capabilities & SVGA_CAP_MULTIMON) {
558 vmw_ldu_init(dev_priv, 1); 563 for (i = 0; i < VMWGFX_LDU_NUM_DU; ++i)
559 vmw_ldu_init(dev_priv, 2); 564 vmw_ldu_init(dev_priv, i);
560 vmw_ldu_init(dev_priv, 3); 565 ret = drm_vblank_init(dev, VMWGFX_LDU_NUM_DU);
561 vmw_ldu_init(dev_priv, 4); 566 } else {
562 vmw_ldu_init(dev_priv, 5); 567 /* for old hardware without multimon only enable one display */
563 vmw_ldu_init(dev_priv, 6); 568 vmw_ldu_init(dev_priv, 0);
564 vmw_ldu_init(dev_priv, 7); 569 ret = drm_vblank_init(dev, 1);
565 } 570 }
566 571
567 return 0; 572 return ret;
568} 573}
569 574
570int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv) 575int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv)
571{ 576{
577 struct drm_device *dev = dev_priv->dev;
578
579 drm_vblank_cleanup(dev);
572 if (!dev_priv->ldu_priv) 580 if (!dev_priv->ldu_priv)
573 return -ENOSYS; 581 return -ENOSYS;
574 582
@@ -610,7 +618,7 @@ int vmw_kms_ldu_update_layout(struct vmw_private *dev_priv, unsigned num,
610 ldu->pref_height = 600; 618 ldu->pref_height = 600;
611 ldu->pref_active = false; 619 ldu->pref_active = false;
612 } 620 }
613 con->status = vmw_ldu_connector_detect(con); 621 con->status = vmw_ldu_connector_detect(con, true);
614 } 622 }
615 623
616 mutex_unlock(&dev->mode_config.mutex); 624 mutex_unlock(&dev->mode_config.mutex);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 5f2d5df01e5c..c8c40e9979db 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -211,6 +211,7 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
211 cmd->body.cid = cpu_to_le32(res->id); 211 cmd->body.cid = cpu_to_le32(res->id);
212 212
213 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 213 vmw_fifo_commit(dev_priv, sizeof(*cmd));
214 vmw_3d_resource_dec(dev_priv);
214} 215}
215 216
216static int vmw_context_init(struct vmw_private *dev_priv, 217static int vmw_context_init(struct vmw_private *dev_priv,
@@ -247,6 +248,7 @@ static int vmw_context_init(struct vmw_private *dev_priv,
247 cmd->body.cid = cpu_to_le32(res->id); 248 cmd->body.cid = cpu_to_le32(res->id);
248 249
249 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 250 vmw_fifo_commit(dev_priv, sizeof(*cmd));
251 (void) vmw_3d_resource_inc(dev_priv);
250 vmw_resource_activate(res, vmw_hw_context_destroy); 252 vmw_resource_activate(res, vmw_hw_context_destroy);
251 return 0; 253 return 0;
252} 254}
@@ -406,6 +408,7 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res)
406 cmd->body.sid = cpu_to_le32(res->id); 408 cmd->body.sid = cpu_to_le32(res->id);
407 409
408 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 410 vmw_fifo_commit(dev_priv, sizeof(*cmd));
411 vmw_3d_resource_dec(dev_priv);
409} 412}
410 413
411void vmw_surface_res_free(struct vmw_resource *res) 414void vmw_surface_res_free(struct vmw_resource *res)
@@ -473,6 +476,7 @@ int vmw_surface_init(struct vmw_private *dev_priv,
473 } 476 }
474 477
475 vmw_fifo_commit(dev_priv, submit_size); 478 vmw_fifo_commit(dev_priv, submit_size);
479 (void) vmw_3d_resource_inc(dev_priv);
476 vmw_resource_activate(res, vmw_hw_surface_destroy); 480 vmw_resource_activate(res, vmw_hw_surface_destroy);
477 return 0; 481 return 0;
478} 482}
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index b87569e96b16..f366f968155a 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -598,7 +598,7 @@ static inline void vga_update_device_decodes(struct vga_device *vgadev,
598 pr_debug("vgaarb: decoding count now is: %d\n", vga_decode_count); 598 pr_debug("vgaarb: decoding count now is: %d\n", vga_decode_count);
599} 599}
600 600
601void __vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes, bool userspace) 601static void __vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes, bool userspace)
602{ 602{
603 struct vga_device *vgadev; 603 struct vga_device *vgadev;
604 unsigned long flags; 604 unsigned long flags;
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 0c52899be964..3f7292486024 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1285,8 +1285,11 @@ static const struct hid_device_id hid_blacklist[] = {
1285 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) }, 1285 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
1286 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) }, 1286 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
1287 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) }, 1287 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
1288 { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_T91MT) },
1289 { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUSTEK_MULTITOUCH_YFO) },
1288 { HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) }, 1290 { HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) },
1289 { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) }, 1291 { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) },
1292 { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) },
1290 { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH) }, 1293 { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH) },
1291 { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6) }, 1294 { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6) },
1292 { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) }, 1295 { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) },
@@ -1578,7 +1581,6 @@ static const struct hid_device_id hid_ignore_list[] = {
1578 { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_24) }, 1581 { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_24) },
1579 { HID_USB_DEVICE(USB_VENDOR_ID_AIRCABLE, USB_DEVICE_ID_AIRCABLE1) }, 1582 { HID_USB_DEVICE(USB_VENDOR_ID_AIRCABLE, USB_DEVICE_ID_AIRCABLE1) },
1580 { HID_USB_DEVICE(USB_VENDOR_ID_ALCOR, USB_DEVICE_ID_ALCOR_USBRS232) }, 1583 { HID_USB_DEVICE(USB_VENDOR_ID_ALCOR, USB_DEVICE_ID_ALCOR_USBRS232) },
1581 { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_T91MT)},
1582 { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM)}, 1584 { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM)},
1583 { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM2)}, 1585 { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM2)},
1584 { HID_USB_DEVICE(USB_VENDOR_ID_AVERMEDIA, USB_DEVICE_ID_AVER_FM_MR800) }, 1586 { HID_USB_DEVICE(USB_VENDOR_ID_AVERMEDIA, USB_DEVICE_ID_AVER_FM_MR800) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 85c6d13c9ffa..765a4f53eb5c 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -105,6 +105,7 @@
105 105
106#define USB_VENDOR_ID_ASUS 0x0486 106#define USB_VENDOR_ID_ASUS 0x0486
107#define USB_DEVICE_ID_ASUS_T91MT 0x0185 107#define USB_DEVICE_ID_ASUS_T91MT 0x0185
108#define USB_DEVICE_ID_ASUSTEK_MULTITOUCH_YFO 0x0186
108 109
109#define USB_VENDOR_ID_ASUSTEK 0x0b05 110#define USB_VENDOR_ID_ASUSTEK 0x0b05
110#define USB_DEVICE_ID_ASUSTEK_LCM 0x1726 111#define USB_DEVICE_ID_ASUSTEK_LCM 0x1726
@@ -128,6 +129,7 @@
128 129
129#define USB_VENDOR_ID_BTC 0x046e 130#define USB_VENDOR_ID_BTC 0x046e
130#define USB_DEVICE_ID_BTC_EMPREX_REMOTE 0x5578 131#define USB_DEVICE_ID_BTC_EMPREX_REMOTE 0x5578
132#define USB_DEVICE_ID_BTC_EMPREX_REMOTE_2 0x5577
131 133
132#define USB_VENDOR_ID_CANDO 0x2087 134#define USB_VENDOR_ID_CANDO 0x2087
133#define USB_DEVICE_ID_CANDO_MULTI_TOUCH 0x0a01 135#define USB_DEVICE_ID_CANDO_MULTI_TOUCH 0x0a01
@@ -149,6 +151,7 @@
149 151
150#define USB_VENDOR_ID_CHICONY 0x04f2 152#define USB_VENDOR_ID_CHICONY 0x04f2
151#define USB_DEVICE_ID_CHICONY_TACTICAL_PAD 0x0418 153#define USB_DEVICE_ID_CHICONY_TACTICAL_PAD 0x0418
154#define USB_DEVICE_ID_CHICONY_MULTI_TOUCH 0xb19d
152 155
153#define USB_VENDOR_ID_CIDC 0x1677 156#define USB_VENDOR_ID_CIDC 0x1677
154 157
@@ -507,6 +510,7 @@
507#define USB_VENDOR_ID_UCLOGIC 0x5543 510#define USB_VENDOR_ID_UCLOGIC 0x5543
508#define USB_DEVICE_ID_UCLOGIC_TABLET_PF1209 0x0042 511#define USB_DEVICE_ID_UCLOGIC_TABLET_PF1209 0x0042
509#define USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U 0x0003 512#define USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U 0x0003
513#define USB_DEVICE_ID_UCLOGIC_TABLET_KNA5 0x6001
510 514
511#define USB_VENDOR_ID_VERNIER 0x08f7 515#define USB_VENDOR_ID_VERNIER 0x08f7
512#define USB_DEVICE_ID_VERNIER_LABPRO 0x0001 516#define USB_DEVICE_ID_VERNIER_LABPRO 0x0001
diff --git a/drivers/hid/hid-mosart.c b/drivers/hid/hid-mosart.c
index e91437c18906..ac5421d568f1 100644
--- a/drivers/hid/hid-mosart.c
+++ b/drivers/hid/hid-mosart.c
@@ -239,6 +239,7 @@ static void mosart_remove(struct hid_device *hdev)
239 239
240static const struct hid_device_id mosart_devices[] = { 240static const struct hid_device_id mosart_devices[] = {
241 { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_T91MT) }, 241 { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_T91MT) },
242 { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUSTEK_MULTITOUCH_YFO) },
242 { } 243 { }
243}; 244};
244MODULE_DEVICE_TABLE(hid, mosart_devices); 245MODULE_DEVICE_TABLE(hid, mosart_devices);
diff --git a/drivers/hid/hid-topseed.c b/drivers/hid/hid-topseed.c
index 5771f851f856..956ed9ac19d4 100644
--- a/drivers/hid/hid-topseed.c
+++ b/drivers/hid/hid-topseed.c
@@ -64,6 +64,7 @@ static int ts_input_mapping(struct hid_device *hdev, struct hid_input *hi,
64static const struct hid_device_id ts_devices[] = { 64static const struct hid_device_id ts_devices[] = {
65 { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) }, 65 { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) },
66 { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) }, 66 { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) },
67 { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) },
67 { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) }, 68 { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) },
68 { } 69 { }
69}; 70};
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index b729c0286679..599041a7f670 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -828,6 +828,7 @@ static int usbhid_output_raw_report(struct hid_device *hid, __u8 *buf, size_t co
828 } 828 }
829 } else { 829 } else {
830 int skipped_report_id = 0; 830 int skipped_report_id = 0;
831 int report_id = buf[0];
831 if (buf[0] == 0x0) { 832 if (buf[0] == 0x0) {
832 /* Don't send the Report ID */ 833 /* Don't send the Report ID */
833 buf++; 834 buf++;
@@ -837,7 +838,7 @@ static int usbhid_output_raw_report(struct hid_device *hid, __u8 *buf, size_t co
837 ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 838 ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
838 HID_REQ_SET_REPORT, 839 HID_REQ_SET_REPORT,
839 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, 840 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
840 ((report_type + 1) << 8) | *buf, 841 ((report_type + 1) << 8) | report_id,
841 interface->desc.bInterfaceNumber, buf, count, 842 interface->desc.bInterfaceNumber, buf, count,
842 USB_CTRL_SET_TIMEOUT); 843 USB_CTRL_SET_TIMEOUT);
843 /* count also the report id, if this was a numbered report. */ 844 /* count also the report id, if this was a numbered report. */
@@ -1445,6 +1446,11 @@ static const struct hid_device_id hid_usb_table[] = {
1445 { } 1446 { }
1446}; 1447};
1447 1448
1449struct usb_interface *usbhid_find_interface(int minor)
1450{
1451 return usb_find_interface(&hid_driver, minor);
1452}
1453
1448static struct hid_driver hid_usb_driver = { 1454static struct hid_driver hid_usb_driver = {
1449 .name = "generic-usb", 1455 .name = "generic-usb",
1450 .id_table = hid_usb_table, 1456 .id_table = hid_usb_table,
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 2643d3147621..70da3181c8a0 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -33,6 +33,7 @@ static const struct hid_blacklist {
33 { USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_PREDATOR, HID_QUIRK_BADPAD }, 33 { USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_PREDATOR, HID_QUIRK_BADPAD },
34 { USB_VENDOR_ID_ALPS, USB_DEVICE_ID_IBM_GAMEPAD, HID_QUIRK_BADPAD }, 34 { USB_VENDOR_ID_ALPS, USB_DEVICE_ID_IBM_GAMEPAD, HID_QUIRK_BADPAD },
35 { USB_VENDOR_ID_CHIC, USB_DEVICE_ID_CHIC_GAMEPAD, HID_QUIRK_BADPAD }, 35 { USB_VENDOR_ID_CHIC, USB_DEVICE_ID_CHIC_GAMEPAD, HID_QUIRK_BADPAD },
36 { USB_VENDOR_ID_DWAV, USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER, HID_QUIRK_MULTI_INPUT | HID_QUIRK_NOGET },
36 { USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH, HID_QUIRK_MULTI_INPUT }, 37 { USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH, HID_QUIRK_MULTI_INPUT },
37 { USB_VENDOR_ID_MOJO, USB_DEVICE_ID_RETRO_ADAPTER, HID_QUIRK_MULTI_INPUT }, 38 { USB_VENDOR_ID_MOJO, USB_DEVICE_ID_RETRO_ADAPTER, HID_QUIRK_MULTI_INPUT },
38 { USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_DRIVING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT }, 39 { USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_DRIVING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
@@ -69,6 +70,7 @@ static const struct hid_blacklist {
69 { USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD, HID_QUIRK_NOGET }, 70 { USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD, HID_QUIRK_NOGET },
70 { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209, HID_QUIRK_MULTI_INPUT }, 71 { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209, HID_QUIRK_MULTI_INPUT },
71 { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U, HID_QUIRK_MULTI_INPUT }, 72 { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U, HID_QUIRK_MULTI_INPUT },
73 { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_KNA5, HID_QUIRK_MULTI_INPUT },
72 { USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS }, 74 { USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS },
73 { USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_QUAD_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT }, 75 { USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_QUAD_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },
74 76
@@ -77,6 +79,8 @@ static const struct hid_blacklist {
77 79
78 { USB_VENDOR_ID_PI_ENGINEERING, USB_DEVICE_ID_PI_ENGINEERING_VEC_USB_FOOTPEDAL, HID_QUIRK_HIDINPUT_FORCE }, 80 { USB_VENDOR_ID_PI_ENGINEERING, USB_DEVICE_ID_PI_ENGINEERING_VEC_USB_FOOTPEDAL, HID_QUIRK_HIDINPUT_FORCE },
79 81
82 { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_MULTI_TOUCH, HID_QUIRK_MULTI_INPUT },
83
80 { 0, 0 } 84 { 0, 0 }
81}; 85};
82 86
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index 0a29c51114aa..681e620eb95b 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -270,7 +270,7 @@ static int hiddev_open(struct inode *inode, struct file *file)
270 struct hiddev *hiddev; 270 struct hiddev *hiddev;
271 int res; 271 int res;
272 272
273 intf = usb_find_interface(&hiddev_driver, iminor(inode)); 273 intf = usbhid_find_interface(iminor(inode));
274 if (!intf) 274 if (!intf)
275 return -ENODEV; 275 return -ENODEV;
276 hid = usb_get_intfdata(intf); 276 hid = usb_get_intfdata(intf);
diff --git a/drivers/hid/usbhid/usbhid.h b/drivers/hid/usbhid/usbhid.h
index 693fd3e720df..89d2e847dcc6 100644
--- a/drivers/hid/usbhid/usbhid.h
+++ b/drivers/hid/usbhid/usbhid.h
@@ -42,6 +42,7 @@ void usbhid_submit_report
42(struct hid_device *hid, struct hid_report *report, unsigned char dir); 42(struct hid_device *hid, struct hid_report *report, unsigned char dir);
43int usbhid_get_power(struct hid_device *hid); 43int usbhid_get_power(struct hid_device *hid);
44void usbhid_put_power(struct hid_device *hid); 44void usbhid_put_power(struct hid_device *hid);
45struct usb_interface *usbhid_find_interface(int minor);
45 46
46/* iofl flags */ 47/* iofl flags */
47#define HID_CTRL_RUNNING 1 48#define HID_CTRL_RUNNING 1
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 4d4d09bdec0a..97499d00615a 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -409,7 +409,7 @@ config SENSORS_CORETEMP
409 409
410config SENSORS_PKGTEMP 410config SENSORS_PKGTEMP
411 tristate "Intel processor package temperature sensor" 411 tristate "Intel processor package temperature sensor"
412 depends on X86 && PCI && EXPERIMENTAL 412 depends on X86 && EXPERIMENTAL
413 help 413 help
414 If you say yes here you get support for the package level temperature 414 If you say yes here you get support for the package level temperature
415 sensor inside your CPU. Check documentation/driver for details. 415 sensor inside your CPU. Check documentation/driver for details.
diff --git a/drivers/hwmon/adm1031.c b/drivers/hwmon/adm1031.c
index 15c1a9616af3..0683e6be662c 100644
--- a/drivers/hwmon/adm1031.c
+++ b/drivers/hwmon/adm1031.c
@@ -79,7 +79,7 @@ struct adm1031_data {
79 int chip_type; 79 int chip_type;
80 char valid; /* !=0 if following fields are valid */ 80 char valid; /* !=0 if following fields are valid */
81 unsigned long last_updated; /* In jiffies */ 81 unsigned long last_updated; /* In jiffies */
82 unsigned int update_rate; /* In milliseconds */ 82 unsigned int update_interval; /* In milliseconds */
83 /* The chan_select_table contains the possible configurations for 83 /* The chan_select_table contains the possible configurations for
84 * auto fan control. 84 * auto fan control.
85 */ 85 */
@@ -743,23 +743,23 @@ static SENSOR_DEVICE_ATTR(temp3_crit_alarm, S_IRUGO, show_alarm, NULL, 12);
743static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_alarm, NULL, 13); 743static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_alarm, NULL, 13);
744static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 14); 744static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 14);
745 745
746/* Update Rate */ 746/* Update Interval */
747static const unsigned int update_rates[] = { 747static const unsigned int update_intervals[] = {
748 16000, 8000, 4000, 2000, 1000, 500, 250, 125, 748 16000, 8000, 4000, 2000, 1000, 500, 250, 125,
749}; 749};
750 750
751static ssize_t show_update_rate(struct device *dev, 751static ssize_t show_update_interval(struct device *dev,
752 struct device_attribute *attr, char *buf) 752 struct device_attribute *attr, char *buf)
753{ 753{
754 struct i2c_client *client = to_i2c_client(dev); 754 struct i2c_client *client = to_i2c_client(dev);
755 struct adm1031_data *data = i2c_get_clientdata(client); 755 struct adm1031_data *data = i2c_get_clientdata(client);
756 756
757 return sprintf(buf, "%u\n", data->update_rate); 757 return sprintf(buf, "%u\n", data->update_interval);
758} 758}
759 759
760static ssize_t set_update_rate(struct device *dev, 760static ssize_t set_update_interval(struct device *dev,
761 struct device_attribute *attr, 761 struct device_attribute *attr,
762 const char *buf, size_t count) 762 const char *buf, size_t count)
763{ 763{
764 struct i2c_client *client = to_i2c_client(dev); 764 struct i2c_client *client = to_i2c_client(dev);
765 struct adm1031_data *data = i2c_get_clientdata(client); 765 struct adm1031_data *data = i2c_get_clientdata(client);
@@ -771,12 +771,15 @@ static ssize_t set_update_rate(struct device *dev,
771 if (err) 771 if (err)
772 return err; 772 return err;
773 773
774 /* find the nearest update rate from the table */ 774 /*
775 for (i = 0; i < ARRAY_SIZE(update_rates) - 1; i++) { 775 * Find the nearest update interval from the table.
776 if (val >= update_rates[i]) 776 * Use it to determine the matching update rate.
777 */
778 for (i = 0; i < ARRAY_SIZE(update_intervals) - 1; i++) {
779 if (val >= update_intervals[i])
777 break; 780 break;
778 } 781 }
779 /* if not found, we point to the last entry (lowest update rate) */ 782 /* if not found, we point to the last entry (lowest update interval) */
780 783
781 /* set the new update rate while preserving other settings */ 784 /* set the new update rate while preserving other settings */
782 reg = adm1031_read_value(client, ADM1031_REG_FAN_FILTER); 785 reg = adm1031_read_value(client, ADM1031_REG_FAN_FILTER);
@@ -785,14 +788,14 @@ static ssize_t set_update_rate(struct device *dev,
785 adm1031_write_value(client, ADM1031_REG_FAN_FILTER, reg); 788 adm1031_write_value(client, ADM1031_REG_FAN_FILTER, reg);
786 789
787 mutex_lock(&data->update_lock); 790 mutex_lock(&data->update_lock);
788 data->update_rate = update_rates[i]; 791 data->update_interval = update_intervals[i];
789 mutex_unlock(&data->update_lock); 792 mutex_unlock(&data->update_lock);
790 793
791 return count; 794 return count;
792} 795}
793 796
794static DEVICE_ATTR(update_rate, S_IRUGO | S_IWUSR, show_update_rate, 797static DEVICE_ATTR(update_interval, S_IRUGO | S_IWUSR, show_update_interval,
795 set_update_rate); 798 set_update_interval);
796 799
797static struct attribute *adm1031_attributes[] = { 800static struct attribute *adm1031_attributes[] = {
798 &sensor_dev_attr_fan1_input.dev_attr.attr, 801 &sensor_dev_attr_fan1_input.dev_attr.attr,
@@ -830,7 +833,7 @@ static struct attribute *adm1031_attributes[] = {
830 833
831 &sensor_dev_attr_auto_fan1_min_pwm.dev_attr.attr, 834 &sensor_dev_attr_auto_fan1_min_pwm.dev_attr.attr,
832 835
833 &dev_attr_update_rate.attr, 836 &dev_attr_update_interval.attr,
834 &dev_attr_alarms.attr, 837 &dev_attr_alarms.attr,
835 838
836 NULL 839 NULL
@@ -981,7 +984,8 @@ static void adm1031_init_client(struct i2c_client *client)
981 mask = ADM1031_UPDATE_RATE_MASK; 984 mask = ADM1031_UPDATE_RATE_MASK;
982 read_val = adm1031_read_value(client, ADM1031_REG_FAN_FILTER); 985 read_val = adm1031_read_value(client, ADM1031_REG_FAN_FILTER);
983 i = (read_val & mask) >> ADM1031_UPDATE_RATE_SHIFT; 986 i = (read_val & mask) >> ADM1031_UPDATE_RATE_SHIFT;
984 data->update_rate = update_rates[i]; 987 /* Save it as update interval */
988 data->update_interval = update_intervals[i];
985} 989}
986 990
987static struct adm1031_data *adm1031_update_device(struct device *dev) 991static struct adm1031_data *adm1031_update_device(struct device *dev)
@@ -993,7 +997,8 @@ static struct adm1031_data *adm1031_update_device(struct device *dev)
993 997
994 mutex_lock(&data->update_lock); 998 mutex_lock(&data->update_lock);
995 999
996 next_update = data->last_updated + msecs_to_jiffies(data->update_rate); 1000 next_update = data->last_updated
1001 + msecs_to_jiffies(data->update_interval);
997 if (time_after(jiffies, next_update) || !data->valid) { 1002 if (time_after(jiffies, next_update) || !data->valid) {
998 1003
999 dev_dbg(&client->dev, "Starting adm1031 update\n"); 1004 dev_dbg(&client->dev, "Starting adm1031 update\n");
diff --git a/drivers/hwmon/ads7871.c b/drivers/hwmon/ads7871.c
index b300a2048af1..52319340e182 100644
--- a/drivers/hwmon/ads7871.c
+++ b/drivers/hwmon/ads7871.c
@@ -160,30 +160,12 @@ static const struct attribute_group ads7871_group = {
160 160
161static int __devinit ads7871_probe(struct spi_device *spi) 161static int __devinit ads7871_probe(struct spi_device *spi)
162{ 162{
163 int status, ret, err = 0; 163 int ret, err;
164 uint8_t val; 164 uint8_t val;
165 struct ads7871_data *pdata; 165 struct ads7871_data *pdata;
166 166
167 dev_dbg(&spi->dev, "probe\n"); 167 dev_dbg(&spi->dev, "probe\n");
168 168
169 pdata = kzalloc(sizeof(struct ads7871_data), GFP_KERNEL);
170 if (!pdata) {
171 err = -ENOMEM;
172 goto exit;
173 }
174
175 status = sysfs_create_group(&spi->dev.kobj, &ads7871_group);
176 if (status < 0)
177 goto error_free;
178
179 pdata->hwmon_dev = hwmon_device_register(&spi->dev);
180 if (IS_ERR(pdata->hwmon_dev)) {
181 err = PTR_ERR(pdata->hwmon_dev);
182 goto error_remove;
183 }
184
185 spi_set_drvdata(spi, pdata);
186
187 /* Configure the SPI bus */ 169 /* Configure the SPI bus */
188 spi->mode = (SPI_MODE_0); 170 spi->mode = (SPI_MODE_0);
189 spi->bits_per_word = 8; 171 spi->bits_per_word = 8;
@@ -201,6 +183,24 @@ static int __devinit ads7871_probe(struct spi_device *spi)
201 we need to make sure we really have a chip*/ 183 we need to make sure we really have a chip*/
202 if (val != ret) { 184 if (val != ret) {
203 err = -ENODEV; 185 err = -ENODEV;
186 goto exit;
187 }
188
189 pdata = kzalloc(sizeof(struct ads7871_data), GFP_KERNEL);
190 if (!pdata) {
191 err = -ENOMEM;
192 goto exit;
193 }
194
195 err = sysfs_create_group(&spi->dev.kobj, &ads7871_group);
196 if (err < 0)
197 goto error_free;
198
199 spi_set_drvdata(spi, pdata);
200
201 pdata->hwmon_dev = hwmon_device_register(&spi->dev);
202 if (IS_ERR(pdata->hwmon_dev)) {
203 err = PTR_ERR(pdata->hwmon_dev);
204 goto error_remove; 204 goto error_remove;
205 } 205 }
206 206
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index c070c9714cbe..a23b17a78ace 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -36,6 +36,7 @@
36#include <linux/pci.h> 36#include <linux/pci.h>
37#include <asm/msr.h> 37#include <asm/msr.h>
38#include <asm/processor.h> 38#include <asm/processor.h>
39#include <asm/smp.h>
39 40
40#define DRVNAME "coretemp" 41#define DRVNAME "coretemp"
41 42
@@ -423,9 +424,18 @@ static int __cpuinit coretemp_device_add(unsigned int cpu)
423 int err; 424 int err;
424 struct platform_device *pdev; 425 struct platform_device *pdev;
425 struct pdev_entry *pdev_entry; 426 struct pdev_entry *pdev_entry;
426#ifdef CONFIG_SMP
427 struct cpuinfo_x86 *c = &cpu_data(cpu); 427 struct cpuinfo_x86 *c = &cpu_data(cpu);
428#endif 428
429 /*
430 * CPUID.06H.EAX[0] indicates whether the CPU has thermal
431 * sensors. We check this bit only, all the early CPUs
432 * without thermal sensors will be filtered out.
433 */
434 if (!cpu_has(c, X86_FEATURE_DTS)) {
435 printk(KERN_INFO DRVNAME ": CPU (model=0x%x)"
436 " has no thermal sensor.\n", c->x86_model);
437 return 0;
438 }
429 439
430 mutex_lock(&pdev_list_mutex); 440 mutex_lock(&pdev_list_mutex);
431 441
@@ -482,14 +492,22 @@ exit:
482 492
483static void coretemp_device_remove(unsigned int cpu) 493static void coretemp_device_remove(unsigned int cpu)
484{ 494{
485 struct pdev_entry *p, *n; 495 struct pdev_entry *p;
496 unsigned int i;
497
486 mutex_lock(&pdev_list_mutex); 498 mutex_lock(&pdev_list_mutex);
487 list_for_each_entry_safe(p, n, &pdev_list, list) { 499 list_for_each_entry(p, &pdev_list, list) {
488 if (p->cpu == cpu) { 500 if (p->cpu != cpu)
489 platform_device_unregister(p->pdev); 501 continue;
490 list_del(&p->list); 502
491 kfree(p); 503 platform_device_unregister(p->pdev);
492 } 504 list_del(&p->list);
505 mutex_unlock(&pdev_list_mutex);
506 kfree(p);
507 for_each_cpu(i, cpu_sibling_mask(cpu))
508 if (i != cpu && !coretemp_device_add(i))
509 break;
510 return;
493 } 511 }
494 mutex_unlock(&pdev_list_mutex); 512 mutex_unlock(&pdev_list_mutex);
495} 513}
@@ -518,7 +536,6 @@ static struct notifier_block coretemp_cpu_notifier __refdata = {
518static int __init coretemp_init(void) 536static int __init coretemp_init(void)
519{ 537{
520 int i, err = -ENODEV; 538 int i, err = -ENODEV;
521 struct pdev_entry *p, *n;
522 539
523 /* quick check if we run Intel */ 540 /* quick check if we run Intel */
524 if (cpu_data(0).x86_vendor != X86_VENDOR_INTEL) 541 if (cpu_data(0).x86_vendor != X86_VENDOR_INTEL)
@@ -528,30 +545,21 @@ static int __init coretemp_init(void)
528 if (err) 545 if (err)
529 goto exit; 546 goto exit;
530 547
531 for_each_online_cpu(i) { 548 for_each_online_cpu(i)
532 struct cpuinfo_x86 *c = &cpu_data(i); 549 coretemp_device_add(i);
533 /* 550
534 * CPUID.06H.EAX[0] indicates whether the CPU has thermal 551#ifndef CONFIG_HOTPLUG_CPU
535 * sensors. We check this bit only, all the early CPUs
536 * without thermal sensors will be filtered out.
537 */
538 if (c->cpuid_level >= 6 && (cpuid_eax(0x06) & 0x01))
539 coretemp_device_add(i);
540 else {
541 printk(KERN_INFO DRVNAME ": CPU (model=0x%x)"
542 " has no thermal sensor.\n", c->x86_model);
543 }
544 }
545 if (list_empty(&pdev_list)) { 552 if (list_empty(&pdev_list)) {
546 err = -ENODEV; 553 err = -ENODEV;
547 goto exit_driver_unreg; 554 goto exit_driver_unreg;
548 } 555 }
556#endif
549 557
550 register_hotcpu_notifier(&coretemp_cpu_notifier); 558 register_hotcpu_notifier(&coretemp_cpu_notifier);
551 return 0; 559 return 0;
552 560
553exit_driver_unreg:
554#ifndef CONFIG_HOTPLUG_CPU 561#ifndef CONFIG_HOTPLUG_CPU
562exit_driver_unreg:
555 platform_driver_unregister(&coretemp_driver); 563 platform_driver_unregister(&coretemp_driver);
556#endif 564#endif
557exit: 565exit:
diff --git a/drivers/hwmon/emc1403.c b/drivers/hwmon/emc1403.c
index 5b58b20dead1..8dee3f38fdfb 100644
--- a/drivers/hwmon/emc1403.c
+++ b/drivers/hwmon/emc1403.c
@@ -308,7 +308,6 @@ static int emc1403_probe(struct i2c_client *client,
308 res = sysfs_create_group(&client->dev.kobj, &m_thermal_gr); 308 res = sysfs_create_group(&client->dev.kobj, &m_thermal_gr);
309 if (res) { 309 if (res) {
310 dev_warn(&client->dev, "create group failed\n"); 310 dev_warn(&client->dev, "create group failed\n");
311 hwmon_device_unregister(data->hwmon_dev);
312 goto thermal_error1; 311 goto thermal_error1;
313 } 312 }
314 data->hwmon_dev = hwmon_device_register(&client->dev); 313 data->hwmon_dev = hwmon_device_register(&client->dev);
diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c
index 537841ef44b9..75afb3b0e076 100644
--- a/drivers/hwmon/f71882fg.c
+++ b/drivers/hwmon/f71882fg.c
@@ -111,7 +111,7 @@ static struct platform_device *f71882fg_pdev;
111/* Super-I/O Function prototypes */ 111/* Super-I/O Function prototypes */
112static inline int superio_inb(int base, int reg); 112static inline int superio_inb(int base, int reg);
113static inline int superio_inw(int base, int reg); 113static inline int superio_inw(int base, int reg);
114static inline void superio_enter(int base); 114static inline int superio_enter(int base);
115static inline void superio_select(int base, int ld); 115static inline void superio_select(int base, int ld);
116static inline void superio_exit(int base); 116static inline void superio_exit(int base);
117 117
@@ -861,11 +861,20 @@ static int superio_inw(int base, int reg)
861 return val; 861 return val;
862} 862}
863 863
864static inline void superio_enter(int base) 864static inline int superio_enter(int base)
865{ 865{
866 /* Don't step on other drivers' I/O space by accident */
867 if (!request_muxed_region(base, 2, DRVNAME)) {
868 printk(KERN_ERR DRVNAME ": I/O address 0x%04x already in use\n",
869 base);
870 return -EBUSY;
871 }
872
866 /* according to the datasheet the key must be send twice! */ 873 /* according to the datasheet the key must be send twice! */
867 outb(SIO_UNLOCK_KEY, base); 874 outb(SIO_UNLOCK_KEY, base);
868 outb(SIO_UNLOCK_KEY, base); 875 outb(SIO_UNLOCK_KEY, base);
876
877 return 0;
869} 878}
870 879
871static inline void superio_select(int base, int ld) 880static inline void superio_select(int base, int ld)
@@ -877,6 +886,7 @@ static inline void superio_select(int base, int ld)
877static inline void superio_exit(int base) 886static inline void superio_exit(int base)
878{ 887{
879 outb(SIO_LOCK_KEY, base); 888 outb(SIO_LOCK_KEY, base);
889 release_region(base, 2);
880} 890}
881 891
882static inline int fan_from_reg(u16 reg) 892static inline int fan_from_reg(u16 reg)
@@ -2175,21 +2185,15 @@ static int f71882fg_remove(struct platform_device *pdev)
2175static int __init f71882fg_find(int sioaddr, unsigned short *address, 2185static int __init f71882fg_find(int sioaddr, unsigned short *address,
2176 struct f71882fg_sio_data *sio_data) 2186 struct f71882fg_sio_data *sio_data)
2177{ 2187{
2178 int err = -ENODEV;
2179 u16 devid; 2188 u16 devid;
2180 2189 int err = superio_enter(sioaddr);
2181 /* Don't step on other drivers' I/O space by accident */ 2190 if (err)
2182 if (!request_region(sioaddr, 2, DRVNAME)) { 2191 return err;
2183 printk(KERN_ERR DRVNAME ": I/O address 0x%04x already in use\n",
2184 (int)sioaddr);
2185 return -EBUSY;
2186 }
2187
2188 superio_enter(sioaddr);
2189 2192
2190 devid = superio_inw(sioaddr, SIO_REG_MANID); 2193 devid = superio_inw(sioaddr, SIO_REG_MANID);
2191 if (devid != SIO_FINTEK_ID) { 2194 if (devid != SIO_FINTEK_ID) {
2192 pr_debug(DRVNAME ": Not a Fintek device\n"); 2195 pr_debug(DRVNAME ": Not a Fintek device\n");
2196 err = -ENODEV;
2193 goto exit; 2197 goto exit;
2194 } 2198 }
2195 2199
@@ -2213,6 +2217,7 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address,
2213 default: 2217 default:
2214 printk(KERN_INFO DRVNAME ": Unsupported Fintek device: %04x\n", 2218 printk(KERN_INFO DRVNAME ": Unsupported Fintek device: %04x\n",
2215 (unsigned int)devid); 2219 (unsigned int)devid);
2220 err = -ENODEV;
2216 goto exit; 2221 goto exit;
2217 } 2222 }
2218 2223
@@ -2223,12 +2228,14 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address,
2223 2228
2224 if (!(superio_inb(sioaddr, SIO_REG_ENABLE) & 0x01)) { 2229 if (!(superio_inb(sioaddr, SIO_REG_ENABLE) & 0x01)) {
2225 printk(KERN_WARNING DRVNAME ": Device not activated\n"); 2230 printk(KERN_WARNING DRVNAME ": Device not activated\n");
2231 err = -ENODEV;
2226 goto exit; 2232 goto exit;
2227 } 2233 }
2228 2234
2229 *address = superio_inw(sioaddr, SIO_REG_ADDR); 2235 *address = superio_inw(sioaddr, SIO_REG_ADDR);
2230 if (*address == 0) { 2236 if (*address == 0) {
2231 printk(KERN_WARNING DRVNAME ": Base address not set\n"); 2237 printk(KERN_WARNING DRVNAME ": Base address not set\n");
2238 err = -ENODEV;
2232 goto exit; 2239 goto exit;
2233 } 2240 }
2234 *address &= ~(REGION_LENGTH - 1); /* Ignore 3 LSB */ 2241 *address &= ~(REGION_LENGTH - 1); /* Ignore 3 LSB */
@@ -2239,7 +2246,6 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address,
2239 (int)superio_inb(sioaddr, SIO_REG_DEVREV)); 2246 (int)superio_inb(sioaddr, SIO_REG_DEVREV));
2240exit: 2247exit:
2241 superio_exit(sioaddr); 2248 superio_exit(sioaddr);
2242 release_region(sioaddr, 2);
2243 return err; 2249 return err;
2244} 2250}
2245 2251
diff --git a/drivers/hwmon/f75375s.c b/drivers/hwmon/f75375s.c
index 0f58ecc5334d..9638d58f99fd 100644
--- a/drivers/hwmon/f75375s.c
+++ b/drivers/hwmon/f75375s.c
@@ -79,7 +79,7 @@ enum chips { f75373, f75375 };
79#define F75375_REG_PWM2_DROP_DUTY 0x6C 79#define F75375_REG_PWM2_DROP_DUTY 0x6C
80 80
81#define FAN_CTRL_LINEAR(nr) (4 + nr) 81#define FAN_CTRL_LINEAR(nr) (4 + nr)
82#define FAN_CTRL_MODE(nr) (5 + ((nr) * 2)) 82#define FAN_CTRL_MODE(nr) (4 + ((nr) * 2))
83 83
84/* 84/*
85 * Data structures and manipulation thereof 85 * Data structures and manipulation thereof
@@ -298,7 +298,7 @@ static int set_pwm_enable_direct(struct i2c_client *client, int nr, int val)
298 return -EINVAL; 298 return -EINVAL;
299 299
300 fanmode = f75375_read8(client, F75375_REG_FAN_TIMER); 300 fanmode = f75375_read8(client, F75375_REG_FAN_TIMER);
301 fanmode = ~(3 << FAN_CTRL_MODE(nr)); 301 fanmode &= ~(3 << FAN_CTRL_MODE(nr));
302 302
303 switch (val) { 303 switch (val) {
304 case 0: /* Full speed */ 304 case 0: /* Full speed */
@@ -350,7 +350,7 @@ static ssize_t set_pwm_mode(struct device *dev, struct device_attribute *attr,
350 350
351 mutex_lock(&data->update_lock); 351 mutex_lock(&data->update_lock);
352 conf = f75375_read8(client, F75375_REG_CONFIG1); 352 conf = f75375_read8(client, F75375_REG_CONFIG1);
353 conf = ~(1 << FAN_CTRL_LINEAR(nr)); 353 conf &= ~(1 << FAN_CTRL_LINEAR(nr));
354 354
355 if (val == 0) 355 if (val == 0)
356 conf |= (1 << FAN_CTRL_LINEAR(nr)) ; 356 conf |= (1 << FAN_CTRL_LINEAR(nr)) ;
diff --git a/drivers/hwmon/hp_accel.c b/drivers/hwmon/hp_accel.c
index 7580f55e67e3..36e957532230 100644
--- a/drivers/hwmon/hp_accel.c
+++ b/drivers/hwmon/hp_accel.c
@@ -221,6 +221,8 @@ static struct dmi_system_id lis3lv02d_dmi_ids[] = {
221 AXIS_DMI_MATCH("HPB442x", "HP ProBook 442", xy_rotated_left), 221 AXIS_DMI_MATCH("HPB442x", "HP ProBook 442", xy_rotated_left),
222 AXIS_DMI_MATCH("HPB452x", "HP ProBook 452", y_inverted), 222 AXIS_DMI_MATCH("HPB452x", "HP ProBook 452", y_inverted),
223 AXIS_DMI_MATCH("HPB522x", "HP ProBook 522", xy_swap), 223 AXIS_DMI_MATCH("HPB522x", "HP ProBook 522", xy_swap),
224 AXIS_DMI_MATCH("HPB532x", "HP ProBook 532", y_inverted),
225 AXIS_DMI_MATCH("Mini5102", "HP Mini 5102", xy_rotated_left_usd),
224 { NULL, } 226 { NULL, }
225/* Laptop models without axis info (yet): 227/* Laptop models without axis info (yet):
226 * "NC6910" "HP Compaq 6910" 228 * "NC6910" "HP Compaq 6910"
diff --git a/drivers/hwmon/k8temp.c b/drivers/hwmon/k8temp.c
index b9bb3e0ca530..39ead2a4d3c5 100644
--- a/drivers/hwmon/k8temp.c
+++ b/drivers/hwmon/k8temp.c
@@ -143,6 +143,37 @@ static const struct pci_device_id k8temp_ids[] = {
143 143
144MODULE_DEVICE_TABLE(pci, k8temp_ids); 144MODULE_DEVICE_TABLE(pci, k8temp_ids);
145 145
146static int __devinit is_rev_g_desktop(u8 model)
147{
148 u32 brandidx;
149
150 if (model < 0x69)
151 return 0;
152
153 if (model == 0xc1 || model == 0x6c || model == 0x7c)
154 return 0;
155
156 /*
157 * Differentiate between AM2 and ASB1.
158 * See "Constructing the processor Name String" in "Revision
159 * Guide for AMD NPT Family 0Fh Processors" (33610).
160 */
161 brandidx = cpuid_ebx(0x80000001);
162 brandidx = (brandidx >> 9) & 0x1f;
163
164 /* Single core */
165 if ((model == 0x6f || model == 0x7f) &&
166 (brandidx == 0x7 || brandidx == 0x9 || brandidx == 0xc))
167 return 0;
168
169 /* Dual core */
170 if (model == 0x6b &&
171 (brandidx == 0xb || brandidx == 0xc))
172 return 0;
173
174 return 1;
175}
176
146static int __devinit k8temp_probe(struct pci_dev *pdev, 177static int __devinit k8temp_probe(struct pci_dev *pdev,
147 const struct pci_device_id *id) 178 const struct pci_device_id *id)
148{ 179{
@@ -179,9 +210,7 @@ static int __devinit k8temp_probe(struct pci_dev *pdev,
179 "wrong - check erratum #141\n"); 210 "wrong - check erratum #141\n");
180 } 211 }
181 212
182 if ((model >= 0x69) && 213 if (is_rev_g_desktop(model)) {
183 !(model == 0xc1 || model == 0x6c || model == 0x7c ||
184 model == 0x6b || model == 0x6f || model == 0x7f)) {
185 /* 214 /*
186 * RevG desktop CPUs (i.e. no socket S1G1 or 215 * RevG desktop CPUs (i.e. no socket S1G1 or
187 * ASB1 parts) need additional offset, 216 * ASB1 parts) need additional offset,
diff --git a/drivers/hwmon/lis3lv02d.c b/drivers/hwmon/lis3lv02d.c
index 6138f036b159..fc591ae53107 100644
--- a/drivers/hwmon/lis3lv02d.c
+++ b/drivers/hwmon/lis3lv02d.c
@@ -277,7 +277,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *dummy)
277 wake_up_interruptible(&lis3_dev.misc_wait); 277 wake_up_interruptible(&lis3_dev.misc_wait);
278 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN); 278 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
279out: 279out:
280 if (lis3_dev.whoami == WAI_8B && lis3_dev.idev && 280 if (lis3_dev.pdata && lis3_dev.whoami == WAI_8B && lis3_dev.idev &&
281 lis3_dev.idev->input->users) 281 lis3_dev.idev->input->users)
282 return IRQ_WAKE_THREAD; 282 return IRQ_WAKE_THREAD;
283 return IRQ_HANDLED; 283 return IRQ_HANDLED;
@@ -718,7 +718,7 @@ int lis3lv02d_init_device(struct lis3lv02d *dev)
718 * io-apic is not configurable (and generates a warning) but I keep it 718 * io-apic is not configurable (and generates a warning) but I keep it
719 * in case of support for other hardware. 719 * in case of support for other hardware.
720 */ 720 */
721 if (dev->whoami == WAI_8B) 721 if (dev->pdata && dev->whoami == WAI_8B)
722 thread_fn = lis302dl_interrupt_thread1_8b; 722 thread_fn = lis302dl_interrupt_thread1_8b;
723 else 723 else
724 thread_fn = NULL; 724 thread_fn = NULL;
diff --git a/drivers/hwmon/lis3lv02d_i2c.c b/drivers/hwmon/lis3lv02d_i2c.c
index dc1f5402c1d7..8e5933b72d19 100644
--- a/drivers/hwmon/lis3lv02d_i2c.c
+++ b/drivers/hwmon/lis3lv02d_i2c.c
@@ -121,7 +121,7 @@ static int lis3lv02d_i2c_suspend(struct i2c_client *client, pm_message_t mesg)
121{ 121{
122 struct lis3lv02d *lis3 = i2c_get_clientdata(client); 122 struct lis3lv02d *lis3 = i2c_get_clientdata(client);
123 123
124 if (!lis3->pdata->wakeup_flags) 124 if (!lis3->pdata || !lis3->pdata->wakeup_flags)
125 lis3lv02d_poweroff(lis3); 125 lis3lv02d_poweroff(lis3);
126 return 0; 126 return 0;
127} 127}
@@ -130,7 +130,7 @@ static int lis3lv02d_i2c_resume(struct i2c_client *client)
130{ 130{
131 struct lis3lv02d *lis3 = i2c_get_clientdata(client); 131 struct lis3lv02d *lis3 = i2c_get_clientdata(client);
132 132
133 if (!lis3->pdata->wakeup_flags) 133 if (!lis3->pdata || !lis3->pdata->wakeup_flags)
134 lis3lv02d_poweron(lis3); 134 lis3lv02d_poweron(lis3);
135 return 0; 135 return 0;
136} 136}
diff --git a/drivers/hwmon/lis3lv02d_spi.c b/drivers/hwmon/lis3lv02d_spi.c
index 82b16808a274..b9be5e3a22b3 100644
--- a/drivers/hwmon/lis3lv02d_spi.c
+++ b/drivers/hwmon/lis3lv02d_spi.c
@@ -92,7 +92,7 @@ static int lis3lv02d_spi_suspend(struct spi_device *spi, pm_message_t mesg)
92{ 92{
93 struct lis3lv02d *lis3 = spi_get_drvdata(spi); 93 struct lis3lv02d *lis3 = spi_get_drvdata(spi);
94 94
95 if (!lis3->pdata->wakeup_flags) 95 if (!lis3->pdata || !lis3->pdata->wakeup_flags)
96 lis3lv02d_poweroff(&lis3_dev); 96 lis3lv02d_poweroff(&lis3_dev);
97 97
98 return 0; 98 return 0;
@@ -102,7 +102,7 @@ static int lis3lv02d_spi_resume(struct spi_device *spi)
102{ 102{
103 struct lis3lv02d *lis3 = spi_get_drvdata(spi); 103 struct lis3lv02d *lis3 = spi_get_drvdata(spi);
104 104
105 if (!lis3->pdata->wakeup_flags) 105 if (!lis3->pdata || !lis3->pdata->wakeup_flags)
106 lis3lv02d_poweron(lis3); 106 lis3lv02d_poweron(lis3);
107 107
108 return 0; 108 return 0;
diff --git a/drivers/hwmon/lm95241.c b/drivers/hwmon/lm95241.c
index 94741d42112d..464340f25496 100644
--- a/drivers/hwmon/lm95241.c
+++ b/drivers/hwmon/lm95241.c
@@ -91,7 +91,7 @@ static struct lm95241_data *lm95241_update_device(struct device *dev);
91struct lm95241_data { 91struct lm95241_data {
92 struct device *hwmon_dev; 92 struct device *hwmon_dev;
93 struct mutex update_lock; 93 struct mutex update_lock;
94 unsigned long last_updated, rate; /* in jiffies */ 94 unsigned long last_updated, interval; /* in jiffies */
95 char valid; /* zero until following fields are valid */ 95 char valid; /* zero until following fields are valid */
96 /* registers values */ 96 /* registers values */
97 u8 local_h, local_l; /* local */ 97 u8 local_h, local_l; /* local */
@@ -114,23 +114,23 @@ show_temp(local);
114show_temp(remote1); 114show_temp(remote1);
115show_temp(remote2); 115show_temp(remote2);
116 116
117static ssize_t show_rate(struct device *dev, struct device_attribute *attr, 117static ssize_t show_interval(struct device *dev, struct device_attribute *attr,
118 char *buf) 118 char *buf)
119{ 119{
120 struct lm95241_data *data = lm95241_update_device(dev); 120 struct lm95241_data *data = lm95241_update_device(dev);
121 121
122 snprintf(buf, PAGE_SIZE - 1, "%lu\n", 1000 * data->rate / HZ); 122 snprintf(buf, PAGE_SIZE - 1, "%lu\n", 1000 * data->interval / HZ);
123 return strlen(buf); 123 return strlen(buf);
124} 124}
125 125
126static ssize_t set_rate(struct device *dev, struct device_attribute *attr, 126static ssize_t set_interval(struct device *dev, struct device_attribute *attr,
127 const char *buf, size_t count) 127 const char *buf, size_t count)
128{ 128{
129 struct i2c_client *client = to_i2c_client(dev); 129 struct i2c_client *client = to_i2c_client(dev);
130 struct lm95241_data *data = i2c_get_clientdata(client); 130 struct lm95241_data *data = i2c_get_clientdata(client);
131 131
132 strict_strtol(buf, 10, &data->rate); 132 strict_strtol(buf, 10, &data->interval);
133 data->rate = data->rate * HZ / 1000; 133 data->interval = data->interval * HZ / 1000;
134 134
135 return count; 135 return count;
136} 136}
@@ -286,7 +286,8 @@ static DEVICE_ATTR(temp2_min, S_IWUSR | S_IRUGO, show_min1, set_min1);
286static DEVICE_ATTR(temp3_min, S_IWUSR | S_IRUGO, show_min2, set_min2); 286static DEVICE_ATTR(temp3_min, S_IWUSR | S_IRUGO, show_min2, set_min2);
287static DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_max1, set_max1); 287static DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_max1, set_max1);
288static DEVICE_ATTR(temp3_max, S_IWUSR | S_IRUGO, show_max2, set_max2); 288static DEVICE_ATTR(temp3_max, S_IWUSR | S_IRUGO, show_max2, set_max2);
289static DEVICE_ATTR(rate, S_IWUSR | S_IRUGO, show_rate, set_rate); 289static DEVICE_ATTR(update_interval, S_IWUSR | S_IRUGO, show_interval,
290 set_interval);
290 291
291static struct attribute *lm95241_attributes[] = { 292static struct attribute *lm95241_attributes[] = {
292 &dev_attr_temp1_input.attr, 293 &dev_attr_temp1_input.attr,
@@ -298,7 +299,7 @@ static struct attribute *lm95241_attributes[] = {
298 &dev_attr_temp3_min.attr, 299 &dev_attr_temp3_min.attr,
299 &dev_attr_temp2_max.attr, 300 &dev_attr_temp2_max.attr,
300 &dev_attr_temp3_max.attr, 301 &dev_attr_temp3_max.attr,
301 &dev_attr_rate.attr, 302 &dev_attr_update_interval.attr,
302 NULL 303 NULL
303}; 304};
304 305
@@ -376,7 +377,7 @@ static void lm95241_init_client(struct i2c_client *client)
376{ 377{
377 struct lm95241_data *data = i2c_get_clientdata(client); 378 struct lm95241_data *data = i2c_get_clientdata(client);
378 379
379 data->rate = HZ; /* 1 sec default */ 380 data->interval = HZ; /* 1 sec default */
380 data->valid = 0; 381 data->valid = 0;
381 data->config = CFG_CR0076; 382 data->config = CFG_CR0076;
382 data->model = 0; 383 data->model = 0;
@@ -410,7 +411,7 @@ static struct lm95241_data *lm95241_update_device(struct device *dev)
410 411
411 mutex_lock(&data->update_lock); 412 mutex_lock(&data->update_lock);
412 413
413 if (time_after(jiffies, data->last_updated + data->rate) || 414 if (time_after(jiffies, data->last_updated + data->interval) ||
414 !data->valid) { 415 !data->valid) {
415 dev_dbg(&client->dev, "Updating lm95241 data.\n"); 416 dev_dbg(&client->dev, "Updating lm95241 data.\n");
416 data->local_h = 417 data->local_h =
diff --git a/drivers/hwmon/pkgtemp.c b/drivers/hwmon/pkgtemp.c
index 74157fcda6ed..f11903936c8b 100644
--- a/drivers/hwmon/pkgtemp.c
+++ b/drivers/hwmon/pkgtemp.c
@@ -33,7 +33,6 @@
33#include <linux/list.h> 33#include <linux/list.h>
34#include <linux/platform_device.h> 34#include <linux/platform_device.h>
35#include <linux/cpu.h> 35#include <linux/cpu.h>
36#include <linux/pci.h>
37#include <asm/msr.h> 36#include <asm/msr.h>
38#include <asm/processor.h> 37#include <asm/processor.h>
39 38
@@ -224,7 +223,7 @@ static int __devinit pkgtemp_probe(struct platform_device *pdev)
224 223
225 err = sysfs_create_group(&pdev->dev.kobj, &pkgtemp_group); 224 err = sysfs_create_group(&pdev->dev.kobj, &pkgtemp_group);
226 if (err) 225 if (err)
227 goto exit_free; 226 goto exit_dev;
228 227
229 data->hwmon_dev = hwmon_device_register(&pdev->dev); 228 data->hwmon_dev = hwmon_device_register(&pdev->dev);
230 if (IS_ERR(data->hwmon_dev)) { 229 if (IS_ERR(data->hwmon_dev)) {
@@ -238,6 +237,8 @@ static int __devinit pkgtemp_probe(struct platform_device *pdev)
238 237
239exit_class: 238exit_class:
240 sysfs_remove_group(&pdev->dev.kobj, &pkgtemp_group); 239 sysfs_remove_group(&pdev->dev.kobj, &pkgtemp_group);
240exit_dev:
241 device_remove_file(&pdev->dev, &sensor_dev_attr_temp1_max.dev_attr);
241exit_free: 242exit_free:
242 kfree(data); 243 kfree(data);
243exit: 244exit:
@@ -250,6 +251,7 @@ static int __devexit pkgtemp_remove(struct platform_device *pdev)
250 251
251 hwmon_device_unregister(data->hwmon_dev); 252 hwmon_device_unregister(data->hwmon_dev);
252 sysfs_remove_group(&pdev->dev.kobj, &pkgtemp_group); 253 sysfs_remove_group(&pdev->dev.kobj, &pkgtemp_group);
254 device_remove_file(&pdev->dev, &sensor_dev_attr_temp1_max.dev_attr);
253 platform_set_drvdata(pdev, NULL); 255 platform_set_drvdata(pdev, NULL);
254 kfree(data); 256 kfree(data);
255 return 0; 257 return 0;
@@ -281,9 +283,10 @@ static int __cpuinit pkgtemp_device_add(unsigned int cpu)
281 int err; 283 int err;
282 struct platform_device *pdev; 284 struct platform_device *pdev;
283 struct pdev_entry *pdev_entry; 285 struct pdev_entry *pdev_entry;
284#ifdef CONFIG_SMP
285 struct cpuinfo_x86 *c = &cpu_data(cpu); 286 struct cpuinfo_x86 *c = &cpu_data(cpu);
286#endif 287
288 if (!cpu_has(c, X86_FEATURE_PTS))
289 return 0;
287 290
288 mutex_lock(&pdev_list_mutex); 291 mutex_lock(&pdev_list_mutex);
289 292
@@ -339,17 +342,18 @@ exit:
339#ifdef CONFIG_HOTPLUG_CPU 342#ifdef CONFIG_HOTPLUG_CPU
340static void pkgtemp_device_remove(unsigned int cpu) 343static void pkgtemp_device_remove(unsigned int cpu)
341{ 344{
342 struct pdev_entry *p, *n; 345 struct pdev_entry *p;
343 unsigned int i; 346 unsigned int i;
344 int err; 347 int err;
345 348
346 mutex_lock(&pdev_list_mutex); 349 mutex_lock(&pdev_list_mutex);
347 list_for_each_entry_safe(p, n, &pdev_list, list) { 350 list_for_each_entry(p, &pdev_list, list) {
348 if (p->cpu != cpu) 351 if (p->cpu != cpu)
349 continue; 352 continue;
350 353
351 platform_device_unregister(p->pdev); 354 platform_device_unregister(p->pdev);
352 list_del(&p->list); 355 list_del(&p->list);
356 mutex_unlock(&pdev_list_mutex);
353 kfree(p); 357 kfree(p);
354 for_each_cpu(i, cpu_core_mask(cpu)) { 358 for_each_cpu(i, cpu_core_mask(cpu)) {
355 if (i != cpu) { 359 if (i != cpu) {
@@ -358,7 +362,7 @@ static void pkgtemp_device_remove(unsigned int cpu)
358 break; 362 break;
359 } 363 }
360 } 364 }
361 break; 365 return;
362 } 366 }
363 mutex_unlock(&pdev_list_mutex); 367 mutex_unlock(&pdev_list_mutex);
364} 368}
@@ -399,11 +403,6 @@ static int __init pkgtemp_init(void)
399 goto exit; 403 goto exit;
400 404
401 for_each_online_cpu(i) { 405 for_each_online_cpu(i) {
402 struct cpuinfo_x86 *c = &cpu_data(i);
403
404 if (!cpu_has(c, X86_FEATURE_PTS))
405 continue;
406
407 err = pkgtemp_device_add(i); 406 err = pkgtemp_device_add(i);
408 if (err) 407 if (err)
409 goto exit_devices_unreg; 408 goto exit_devices_unreg;
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
index e96e69dd36fb..072c58008a63 100644
--- a/drivers/hwmon/w83627ehf.c
+++ b/drivers/hwmon/w83627ehf.c
@@ -127,6 +127,7 @@ superio_enter(int ioreg)
127static inline void 127static inline void
128superio_exit(int ioreg) 128superio_exit(int ioreg)
129{ 129{
130 outb(0xaa, ioreg);
130 outb(0x02, ioreg); 131 outb(0x02, ioreg);
131 outb(0x02, ioreg + 1); 132 outb(0x02, ioreg + 1);
132} 133}
diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
index f7bd2613cecc..f2de3be35df3 100644
--- a/drivers/i2c/busses/i2c-cpm.c
+++ b/drivers/i2c/busses/i2c-cpm.c
@@ -677,6 +677,11 @@ static int __devinit cpm_i2c_probe(struct platform_device *ofdev,
677 dev_dbg(&ofdev->dev, "hw routines for %s registered.\n", 677 dev_dbg(&ofdev->dev, "hw routines for %s registered.\n",
678 cpm->adap.name); 678 cpm->adap.name);
679 679
680 /*
681 * register OF I2C devices
682 */
683 of_i2c_register_devices(&cpm->adap);
684
680 return 0; 685 return 0;
681out_shut: 686out_shut:
682 cpm_i2c_shutdown(cpm); 687 cpm_i2c_shutdown(cpm);
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index 2222c87876b9..b8feac5f2ef4 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -357,9 +357,6 @@ i2c_davinci_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop)
357 357
358 dev->terminate = 0; 358 dev->terminate = 0;
359 359
360 /* write the data into mode register */
361 davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, flag);
362
363 /* 360 /*
364 * First byte should be set here, not after interrupt, 361 * First byte should be set here, not after interrupt,
365 * because transmit-data-ready interrupt can come before 362 * because transmit-data-ready interrupt can come before
@@ -371,6 +368,9 @@ i2c_davinci_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop)
371 dev->buf_len--; 368 dev->buf_len--;
372 } 369 }
373 370
371 /* write the data into mode register; start transmitting */
372 davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, flag);
373
374 r = wait_for_completion_interruptible_timeout(&dev->cmd_complete, 374 r = wait_for_completion_interruptible_timeout(&dev->cmd_complete,
375 dev->adapter.timeout); 375 dev->adapter.timeout);
376 if (r == 0) { 376 if (r == 0) {
diff --git a/drivers/i2c/busses/i2c-ibm_iic.c b/drivers/i2c/busses/i2c-ibm_iic.c
index 43ca32fddde2..89eedf45d30e 100644
--- a/drivers/i2c/busses/i2c-ibm_iic.c
+++ b/drivers/i2c/busses/i2c-ibm_iic.c
@@ -761,6 +761,9 @@ static int __devinit iic_probe(struct platform_device *ofdev,
761 dev_info(&ofdev->dev, "using %s mode\n", 761 dev_info(&ofdev->dev, "using %s mode\n",
762 dev->fast_mode ? "fast (400 kHz)" : "standard (100 kHz)"); 762 dev->fast_mode ? "fast (400 kHz)" : "standard (100 kHz)");
763 763
764 /* Now register all the child nodes */
765 of_i2c_register_devices(adap);
766
764 return 0; 767 return 0;
765 768
766error_cleanup: 769error_cleanup:
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index a1c419a716af..b74e6dc6886c 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -632,6 +632,7 @@ static int __devinit fsl_i2c_probe(struct platform_device *op,
632 dev_err(i2c->dev, "failed to add adapter\n"); 632 dev_err(i2c->dev, "failed to add adapter\n");
633 goto fail_add; 633 goto fail_add;
634 } 634 }
635 of_i2c_register_devices(&i2c->adap);
635 636
636 return result; 637 return result;
637 638
diff --git a/drivers/i2c/busses/i2c-octeon.c b/drivers/i2c/busses/i2c-octeon.c
index 0e9f85d0a835..56dbe54e8811 100644
--- a/drivers/i2c/busses/i2c-octeon.c
+++ b/drivers/i2c/busses/i2c-octeon.c
@@ -218,7 +218,7 @@ static int octeon_i2c_wait(struct octeon_i2c *i2c)
218 return result; 218 return result;
219 } else if (result == 0) { 219 } else if (result == 0) {
220 dev_dbg(i2c->dev, "%s: timeout\n", __func__); 220 dev_dbg(i2c->dev, "%s: timeout\n", __func__);
221 result = -ETIMEDOUT; 221 return -ETIMEDOUT;
222 } 222 }
223 223
224 return 0; 224 return 0;
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 7674efb55378..b33c78586bfc 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -680,6 +680,8 @@ omap_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
680 680
681 if (r == 0) 681 if (r == 0)
682 r = num; 682 r = num;
683
684 omap_i2c_wait_for_bb(dev);
683out: 685out:
684 omap_i2c_idle(dev); 686 omap_i2c_idle(dev);
685 return r; 687 return r;
diff --git a/drivers/i2c/busses/i2c-pca-isa.c b/drivers/i2c/busses/i2c-pca-isa.c
index bbd77603a417..29933f87d8fa 100644
--- a/drivers/i2c/busses/i2c-pca-isa.c
+++ b/drivers/i2c/busses/i2c-pca-isa.c
@@ -71,8 +71,8 @@ static int pca_isa_readbyte(void *pd, int reg)
71 71
72static int pca_isa_waitforcompletion(void *pd) 72static int pca_isa_waitforcompletion(void *pd)
73{ 73{
74 long ret = ~0;
75 unsigned long timeout; 74 unsigned long timeout;
75 long ret;
76 76
77 if (irq > -1) { 77 if (irq > -1) {
78 ret = wait_event_timeout(pca_wait, 78 ret = wait_event_timeout(pca_wait,
@@ -81,11 +81,15 @@ static int pca_isa_waitforcompletion(void *pd)
81 } else { 81 } else {
82 /* Do polling */ 82 /* Do polling */
83 timeout = jiffies + pca_isa_ops.timeout; 83 timeout = jiffies + pca_isa_ops.timeout;
84 while (((pca_isa_readbyte(pd, I2C_PCA_CON) 84 do {
85 & I2C_PCA_CON_SI) == 0) 85 ret = time_before(jiffies, timeout);
86 && (ret = time_before(jiffies, timeout))) 86 if (pca_isa_readbyte(pd, I2C_PCA_CON)
87 & I2C_PCA_CON_SI)
88 break;
87 udelay(100); 89 udelay(100);
90 } while (ret);
88 } 91 }
92
89 return ret > 0; 93 return ret > 0;
90} 94}
91 95
diff --git a/drivers/i2c/busses/i2c-pca-platform.c b/drivers/i2c/busses/i2c-pca-platform.c
index ef5c78487eb7..5f6d7f89e225 100644
--- a/drivers/i2c/busses/i2c-pca-platform.c
+++ b/drivers/i2c/busses/i2c-pca-platform.c
@@ -80,8 +80,8 @@ static void i2c_pca_pf_writebyte32(void *pd, int reg, int val)
80static int i2c_pca_pf_waitforcompletion(void *pd) 80static int i2c_pca_pf_waitforcompletion(void *pd)
81{ 81{
82 struct i2c_pca_pf_data *i2c = pd; 82 struct i2c_pca_pf_data *i2c = pd;
83 long ret = ~0;
84 unsigned long timeout; 83 unsigned long timeout;
84 long ret;
85 85
86 if (i2c->irq) { 86 if (i2c->irq) {
87 ret = wait_event_timeout(i2c->wait, 87 ret = wait_event_timeout(i2c->wait,
@@ -90,10 +90,13 @@ static int i2c_pca_pf_waitforcompletion(void *pd)
90 } else { 90 } else {
91 /* Do polling */ 91 /* Do polling */
92 timeout = jiffies + i2c->adap.timeout; 92 timeout = jiffies + i2c->adap.timeout;
93 while (((i2c->algo_data.read_byte(i2c, I2C_PCA_CON) 93 do {
94 & I2C_PCA_CON_SI) == 0) 94 ret = time_before(jiffies, timeout);
95 && (ret = time_before(jiffies, timeout))) 95 if (i2c->algo_data.read_byte(i2c, I2C_PCA_CON)
96 & I2C_PCA_CON_SI)
97 break;
96 udelay(100); 98 udelay(100);
99 } while (ret);
97 } 100 }
98 101
99 return ret > 0; 102 return ret > 0;
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 72902e0bbfa7..bf831bf81587 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -662,8 +662,8 @@ static int s3c24xx_i2c_clockrate(struct s3c24xx_i2c *i2c, unsigned int *got)
662 unsigned long sda_delay; 662 unsigned long sda_delay;
663 663
664 if (pdata->sda_delay) { 664 if (pdata->sda_delay) {
665 sda_delay = (freq / 1000) * pdata->sda_delay; 665 sda_delay = clkin * pdata->sda_delay;
666 sda_delay /= 1000000; 666 sda_delay = DIV_ROUND_UP(sda_delay, 1000000);
667 sda_delay = DIV_ROUND_UP(sda_delay, 5); 667 sda_delay = DIV_ROUND_UP(sda_delay, 5);
668 if (sda_delay > 3) 668 if (sda_delay > 3)
669 sda_delay = 3; 669 sda_delay = 3;
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 6649176de940..bea4c5021d26 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -32,7 +32,6 @@
32#include <linux/init.h> 32#include <linux/init.h>
33#include <linux/idr.h> 33#include <linux/idr.h>
34#include <linux/mutex.h> 34#include <linux/mutex.h>
35#include <linux/of_i2c.h>
36#include <linux/of_device.h> 35#include <linux/of_device.h>
37#include <linux/completion.h> 36#include <linux/completion.h>
38#include <linux/hardirq.h> 37#include <linux/hardirq.h>
@@ -197,11 +196,12 @@ static int i2c_device_pm_suspend(struct device *dev)
197{ 196{
198 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 197 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
199 198
200 if (pm_runtime_suspended(dev)) 199 if (pm) {
201 return 0; 200 if (pm_runtime_suspended(dev))
202 201 return 0;
203 if (pm) 202 else
204 return pm->suspend ? pm->suspend(dev) : 0; 203 return pm->suspend ? pm->suspend(dev) : 0;
204 }
205 205
206 return i2c_legacy_suspend(dev, PMSG_SUSPEND); 206 return i2c_legacy_suspend(dev, PMSG_SUSPEND);
207} 207}
@@ -216,12 +216,6 @@ static int i2c_device_pm_resume(struct device *dev)
216 else 216 else
217 ret = i2c_legacy_resume(dev); 217 ret = i2c_legacy_resume(dev);
218 218
219 if (!ret) {
220 pm_runtime_disable(dev);
221 pm_runtime_set_active(dev);
222 pm_runtime_enable(dev);
223 }
224
225 return ret; 219 return ret;
226} 220}
227 221
@@ -229,11 +223,12 @@ static int i2c_device_pm_freeze(struct device *dev)
229{ 223{
230 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 224 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
231 225
232 if (pm_runtime_suspended(dev)) 226 if (pm) {
233 return 0; 227 if (pm_runtime_suspended(dev))
234 228 return 0;
235 if (pm) 229 else
236 return pm->freeze ? pm->freeze(dev) : 0; 230 return pm->freeze ? pm->freeze(dev) : 0;
231 }
237 232
238 return i2c_legacy_suspend(dev, PMSG_FREEZE); 233 return i2c_legacy_suspend(dev, PMSG_FREEZE);
239} 234}
@@ -242,11 +237,12 @@ static int i2c_device_pm_thaw(struct device *dev)
242{ 237{
243 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 238 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
244 239
245 if (pm_runtime_suspended(dev)) 240 if (pm) {
246 return 0; 241 if (pm_runtime_suspended(dev))
247 242 return 0;
248 if (pm) 243 else
249 return pm->thaw ? pm->thaw(dev) : 0; 244 return pm->thaw ? pm->thaw(dev) : 0;
245 }
250 246
251 return i2c_legacy_resume(dev); 247 return i2c_legacy_resume(dev);
252} 248}
@@ -255,11 +251,12 @@ static int i2c_device_pm_poweroff(struct device *dev)
255{ 251{
256 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 252 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
257 253
258 if (pm_runtime_suspended(dev)) 254 if (pm) {
259 return 0; 255 if (pm_runtime_suspended(dev))
260 256 return 0;
261 if (pm) 257 else
262 return pm->poweroff ? pm->poweroff(dev) : 0; 258 return pm->poweroff ? pm->poweroff(dev) : 0;
259 }
263 260
264 return i2c_legacy_suspend(dev, PMSG_HIBERNATE); 261 return i2c_legacy_suspend(dev, PMSG_HIBERNATE);
265} 262}
@@ -876,9 +873,6 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
876 if (adap->nr < __i2c_first_dynamic_bus_num) 873 if (adap->nr < __i2c_first_dynamic_bus_num)
877 i2c_scan_static_board_info(adap); 874 i2c_scan_static_board_info(adap);
878 875
879 /* Register devices from the device tree */
880 of_i2c_register_devices(adap);
881
882 /* Notify drivers */ 876 /* Notify drivers */
883 mutex_lock(&core_lock); 877 mutex_lock(&core_lock);
884 bus_for_each_drv(&i2c_bus_type, NULL, adap, __process_new_adapter); 878 bus_for_each_drv(&i2c_bus_type, NULL, adap, __process_new_adapter);
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 4c3d1bfec0c5..068cef0a987a 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -1448,19 +1448,13 @@ int ide_host_register(struct ide_host *host, const struct ide_port_info *d,
1448 if (hwif == NULL) 1448 if (hwif == NULL)
1449 continue; 1449 continue;
1450 1450
1451 if (hwif->present)
1452 hwif_register_devices(hwif);
1453 }
1454
1455 ide_host_for_each_port(i, hwif, host) {
1456 if (hwif == NULL)
1457 continue;
1458
1459 ide_sysfs_register_port(hwif); 1451 ide_sysfs_register_port(hwif);
1460 ide_proc_register_port(hwif); 1452 ide_proc_register_port(hwif);
1461 1453
1462 if (hwif->present) 1454 if (hwif->present) {
1463 ide_proc_port_register_devices(hwif); 1455 ide_proc_port_register_devices(hwif);
1456 hwif_register_devices(hwif);
1457 }
1464 } 1458 }
1465 1459
1466 return j ? 0 : -1; 1460 return j ? 0 : -1;
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index a10152bb1427..c37ef64d1465 100755..100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -83,7 +83,7 @@ static unsigned int mwait_substates;
83/* Reliable LAPIC Timer States, bit 1 for C1 etc. */ 83/* Reliable LAPIC Timer States, bit 1 for C1 etc. */
84static unsigned int lapic_timer_reliable_states; 84static unsigned int lapic_timer_reliable_states;
85 85
86static struct cpuidle_device *intel_idle_cpuidle_devices; 86static struct cpuidle_device __percpu *intel_idle_cpuidle_devices;
87static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state); 87static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state);
88 88
89static struct cpuidle_state *cpuidle_state_table; 89static struct cpuidle_state *cpuidle_state_table;
@@ -108,7 +108,7 @@ static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = {
108 .name = "NHM-C3", 108 .name = "NHM-C3",
109 .desc = "MWAIT 0x10", 109 .desc = "MWAIT 0x10",
110 .driver_data = (void *) 0x10, 110 .driver_data = (void *) 0x10,
111 .flags = CPUIDLE_FLAG_TIME_VALID, 111 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
112 .exit_latency = 20, 112 .exit_latency = 20,
113 .power_usage = 500, 113 .power_usage = 500,
114 .target_residency = 80, 114 .target_residency = 80,
@@ -117,7 +117,7 @@ static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = {
117 .name = "NHM-C6", 117 .name = "NHM-C6",
118 .desc = "MWAIT 0x20", 118 .desc = "MWAIT 0x20",
119 .driver_data = (void *) 0x20, 119 .driver_data = (void *) 0x20,
120 .flags = CPUIDLE_FLAG_TIME_VALID, 120 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
121 .exit_latency = 200, 121 .exit_latency = 200,
122 .power_usage = 350, 122 .power_usage = 350,
123 .target_residency = 800, 123 .target_residency = 800,
@@ -149,7 +149,7 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
149 .name = "ATM-C4", 149 .name = "ATM-C4",
150 .desc = "MWAIT 0x30", 150 .desc = "MWAIT 0x30",
151 .driver_data = (void *) 0x30, 151 .driver_data = (void *) 0x30,
152 .flags = CPUIDLE_FLAG_TIME_VALID, 152 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
153 .exit_latency = 100, 153 .exit_latency = 100,
154 .power_usage = 250, 154 .power_usage = 250,
155 .target_residency = 400, 155 .target_residency = 400,
@@ -157,13 +157,13 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
157 { /* MWAIT C5 */ }, 157 { /* MWAIT C5 */ },
158 { /* MWAIT C6 */ 158 { /* MWAIT C6 */
159 .name = "ATM-C6", 159 .name = "ATM-C6",
160 .desc = "MWAIT 0x40", 160 .desc = "MWAIT 0x52",
161 .driver_data = (void *) 0x40, 161 .driver_data = (void *) 0x52,
162 .flags = CPUIDLE_FLAG_TIME_VALID, 162 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
163 .exit_latency = 200, 163 .exit_latency = 140,
164 .power_usage = 150, 164 .power_usage = 150,
165 .target_residency = 800, 165 .target_residency = 560,
166 .enter = NULL }, /* disabled */ 166 .enter = &intel_idle },
167}; 167};
168 168
169/** 169/**
@@ -185,6 +185,16 @@ static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state)
185 185
186 local_irq_disable(); 186 local_irq_disable();
187 187
188 /*
189 * If the state flag indicates that the TLB will be flushed or if this
190 * is the deepest c-state supported, do a voluntary leave mm to avoid
191 * costly and mostly unnecessary wakeups for flushing the user TLB's
192 * associated with the active mm.
193 */
194 if (state->flags & CPUIDLE_FLAG_TLB_FLUSHED ||
195 (&dev->states[dev->state_count - 1] == state))
196 leave_mm(cpu);
197
188 if (!(lapic_timer_reliable_states & (1 << (cstate)))) 198 if (!(lapic_timer_reliable_states & (1 << (cstate))))
189 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu); 199 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
190 200
diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c
index d0dc1db80b29..50815022cff1 100644
--- a/drivers/ieee1394/ohci1394.c
+++ b/drivers/ieee1394/ohci1394.c
@@ -1106,7 +1106,7 @@ static int ohci_iso_recv_init(struct hpsb_iso *iso)
1106 if (recv->block_irq_interval * 4 > iso->buf_packets) 1106 if (recv->block_irq_interval * 4 > iso->buf_packets)
1107 recv->block_irq_interval = iso->buf_packets / 4; 1107 recv->block_irq_interval = iso->buf_packets / 4;
1108 if (recv->block_irq_interval < 1) 1108 if (recv->block_irq_interval < 1)
1109 recv->block_irq_interval = 1; 1109 recv->block_irq_interval = 1;
1110 1110
1111 /* choose a buffer stride */ 1111 /* choose a buffer stride */
1112 /* must be a power of 2, and <= PAGE_SIZE */ 1112 /* must be a power of 2, and <= PAGE_SIZE */
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.h b/drivers/infiniband/hw/cxgb3/cxio_hal.h
index 8f0caf7d4482..78fbe9ffe7f0 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.h
@@ -53,7 +53,7 @@
53#define T3_MAX_PBL_SIZE 256 53#define T3_MAX_PBL_SIZE 256
54#define T3_MAX_RQ_SIZE 1024 54#define T3_MAX_RQ_SIZE 1024
55#define T3_MAX_QP_DEPTH (T3_MAX_RQ_SIZE-1) 55#define T3_MAX_QP_DEPTH (T3_MAX_RQ_SIZE-1)
56#define T3_MAX_CQ_DEPTH 262144 56#define T3_MAX_CQ_DEPTH 65536
57#define T3_MAX_NUM_STAG (1<<15) 57#define T3_MAX_NUM_STAG (1<<15)
58#define T3_MAX_MR_SIZE 0x100000000ULL 58#define T3_MAX_MR_SIZE 0x100000000ULL
59#define T3_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */ 59#define T3_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index d88077a21994..13c88871dc3b 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -463,7 +463,8 @@ static int send_connect(struct iwch_ep *ep)
463 V_MSS_IDX(mtu_idx) | 463 V_MSS_IDX(mtu_idx) |
464 V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx); 464 V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);
465 opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10); 465 opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);
466 opt2 = V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(cong_flavor); 466 opt2 = F_RX_COALESCE_VALID | V_RX_COALESCE(0) | V_FLAVORS_VALID(1) |
467 V_CONG_CONTROL_FLAVOR(cong_flavor);
467 skb->priority = CPL_PRIORITY_SETUP; 468 skb->priority = CPL_PRIORITY_SETUP;
468 set_arp_failure_handler(skb, act_open_req_arp_failure); 469 set_arp_failure_handler(skb, act_open_req_arp_failure);
469 470
@@ -1280,7 +1281,8 @@ static void accept_cr(struct iwch_ep *ep, __be32 peer_ip, struct sk_buff *skb)
1280 V_MSS_IDX(mtu_idx) | 1281 V_MSS_IDX(mtu_idx) |
1281 V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx); 1282 V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);
1282 opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10); 1283 opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);
1283 opt2 = V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(cong_flavor); 1284 opt2 = F_RX_COALESCE_VALID | V_RX_COALESCE(0) | V_FLAVORS_VALID(1) |
1285 V_CONG_CONTROL_FLAVOR(cong_flavor);
1284 1286
1285 rpl = cplhdr(skb); 1287 rpl = cplhdr(skb);
1286 rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 1288 rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 443cea55daac..61e0efd4ccfb 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -502,7 +502,9 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb,
502static void nes_retrans_expired(struct nes_cm_node *cm_node) 502static void nes_retrans_expired(struct nes_cm_node *cm_node)
503{ 503{
504 struct iw_cm_id *cm_id = cm_node->cm_id; 504 struct iw_cm_id *cm_id = cm_node->cm_id;
505 switch (cm_node->state) { 505 enum nes_cm_node_state state = cm_node->state;
506 cm_node->state = NES_CM_STATE_CLOSED;
507 switch (state) {
506 case NES_CM_STATE_SYN_RCVD: 508 case NES_CM_STATE_SYN_RCVD:
507 case NES_CM_STATE_CLOSING: 509 case NES_CM_STATE_CLOSING:
508 rem_ref_cm_node(cm_node->cm_core, cm_node); 510 rem_ref_cm_node(cm_node->cm_core, cm_node);
@@ -511,7 +513,6 @@ static void nes_retrans_expired(struct nes_cm_node *cm_node)
511 case NES_CM_STATE_FIN_WAIT1: 513 case NES_CM_STATE_FIN_WAIT1:
512 if (cm_node->cm_id) 514 if (cm_node->cm_id)
513 cm_id->rem_ref(cm_id); 515 cm_id->rem_ref(cm_id);
514 cm_node->state = NES_CM_STATE_CLOSED;
515 send_reset(cm_node, NULL); 516 send_reset(cm_node, NULL);
516 break; 517 break;
517 default: 518 default:
@@ -1439,9 +1440,6 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
1439 break; 1440 break;
1440 case NES_CM_STATE_MPAREQ_RCVD: 1441 case NES_CM_STATE_MPAREQ_RCVD:
1441 passive_state = atomic_add_return(1, &cm_node->passive_state); 1442 passive_state = atomic_add_return(1, &cm_node->passive_state);
1442 if (passive_state == NES_SEND_RESET_EVENT)
1443 create_event(cm_node, NES_CM_EVENT_RESET);
1444 cm_node->state = NES_CM_STATE_CLOSED;
1445 dev_kfree_skb_any(skb); 1443 dev_kfree_skb_any(skb);
1446 break; 1444 break;
1447 case NES_CM_STATE_ESTABLISHED: 1445 case NES_CM_STATE_ESTABLISHED:
@@ -1456,6 +1454,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
1456 case NES_CM_STATE_CLOSED: 1454 case NES_CM_STATE_CLOSED:
1457 drop_packet(skb); 1455 drop_packet(skb);
1458 break; 1456 break;
1457 case NES_CM_STATE_FIN_WAIT2:
1459 case NES_CM_STATE_FIN_WAIT1: 1458 case NES_CM_STATE_FIN_WAIT1:
1460 case NES_CM_STATE_LAST_ACK: 1459 case NES_CM_STATE_LAST_ACK:
1461 cm_node->cm_id->rem_ref(cm_node->cm_id); 1460 cm_node->cm_id->rem_ref(cm_node->cm_id);
@@ -2777,6 +2776,12 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2777 return -EINVAL; 2776 return -EINVAL;
2778 } 2777 }
2779 2778
2779 passive_state = atomic_add_return(1, &cm_node->passive_state);
2780 if (passive_state == NES_SEND_RESET_EVENT) {
2781 rem_ref_cm_node(cm_node->cm_core, cm_node);
2782 return -ECONNRESET;
2783 }
2784
2780 /* associate the node with the QP */ 2785 /* associate the node with the QP */
2781 nesqp->cm_node = (void *)cm_node; 2786 nesqp->cm_node = (void *)cm_node;
2782 cm_node->nesqp = nesqp; 2787 cm_node->nesqp = nesqp;
@@ -2979,9 +2984,6 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2979 printk(KERN_ERR "%s[%u] OFA CM event_handler returned, " 2984 printk(KERN_ERR "%s[%u] OFA CM event_handler returned, "
2980 "ret=%d\n", __func__, __LINE__, ret); 2985 "ret=%d\n", __func__, __LINE__, ret);
2981 2986
2982 passive_state = atomic_add_return(1, &cm_node->passive_state);
2983 if (passive_state == NES_SEND_RESET_EVENT)
2984 create_event(cm_node, NES_CM_EVENT_RESET);
2985 return 0; 2987 return 0;
2986} 2988}
2987 2989
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index f8233c851c69..1980a461c499 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -3468,6 +3468,19 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
3468 return; /* Ignore it, wait for close complete */ 3468 return; /* Ignore it, wait for close complete */
3469 3469
3470 if (atomic_inc_return(&nesqp->close_timer_started) == 1) { 3470 if (atomic_inc_return(&nesqp->close_timer_started) == 1) {
3471 if ((tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) &&
3472 (nesqp->ibqp_state == IB_QPS_RTS) &&
3473 ((nesadapter->eeprom_version >> 16) != NES_A0)) {
3474 spin_lock_irqsave(&nesqp->lock, flags);
3475 nesqp->hw_iwarp_state = iwarp_state;
3476 nesqp->hw_tcp_state = tcp_state;
3477 nesqp->last_aeq = async_event_id;
3478 next_iwarp_state = NES_CQP_QP_IWARP_STATE_CLOSING;
3479 nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_CLOSING;
3480 spin_unlock_irqrestore(&nesqp->lock, flags);
3481 nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0, 0);
3482 nes_cm_disconn(nesqp);
3483 }
3471 nesqp->cm_id->add_ref(nesqp->cm_id); 3484 nesqp->cm_id->add_ref(nesqp->cm_id);
3472 schedule_nes_timer(nesqp->cm_node, (struct sk_buff *)nesqp, 3485 schedule_nes_timer(nesqp->cm_node, (struct sk_buff *)nesqp,
3473 NES_TIMER_TYPE_CLOSE, 1, 0); 3486 NES_TIMER_TYPE_CLOSE, 1, 0);
@@ -3477,7 +3490,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
3477 nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount), 3490 nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount),
3478 async_event_id, nesqp->last_aeq, tcp_state); 3491 async_event_id, nesqp->last_aeq, tcp_state);
3479 } 3492 }
3480
3481 break; 3493 break;
3482 case NES_AEQE_AEID_LLP_CLOSE_COMPLETE: 3494 case NES_AEQE_AEID_LLP_CLOSE_COMPLETE:
3483 if (nesqp->term_flags) { 3495 if (nesqp->term_flags) {
diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h
index aa9183db32b1..1204c3432b63 100644
--- a/drivers/infiniband/hw/nes/nes_hw.h
+++ b/drivers/infiniband/hw/nes/nes_hw.h
@@ -45,6 +45,7 @@
45#define NES_PHY_TYPE_KR 9 45#define NES_PHY_TYPE_KR 9
46 46
47#define NES_MULTICAST_PF_MAX 8 47#define NES_MULTICAST_PF_MAX 8
48#define NES_A0 3
48 49
49enum pci_regs { 50enum pci_regs {
50 NES_INT_STAT = 0x0000, 51 NES_INT_STAT = 0x0000,
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 6dfdd49cdbcf..10560c796fd6 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -1446,14 +1446,14 @@ static int nes_netdev_set_pauseparam(struct net_device *netdev,
1446 NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200)); 1446 NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200));
1447 u32temp |= NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE; 1447 u32temp |= NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE;
1448 nes_write_indexed(nesdev, 1448 nes_write_indexed(nesdev,
1449 NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE + (nesdev->mac_index*0x200), u32temp); 1449 NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200), u32temp);
1450 nesdev->disable_tx_flow_control = 0; 1450 nesdev->disable_tx_flow_control = 0;
1451 } else if ((et_pauseparam->tx_pause == 0) && (nesdev->disable_tx_flow_control == 0)) { 1451 } else if ((et_pauseparam->tx_pause == 0) && (nesdev->disable_tx_flow_control == 0)) {
1452 u32temp = nes_read_indexed(nesdev, 1452 u32temp = nes_read_indexed(nesdev,
1453 NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200)); 1453 NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200));
1454 u32temp &= ~NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE; 1454 u32temp &= ~NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE;
1455 nes_write_indexed(nesdev, 1455 nes_write_indexed(nesdev,
1456 NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE + (nesdev->mac_index*0x200), u32temp); 1456 NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200), u32temp);
1457 nesdev->disable_tx_flow_control = 1; 1457 nesdev->disable_tx_flow_control = 1;
1458 } 1458 }
1459 if ((et_pauseparam->rx_pause == 1) && (nesdev->disable_rx_flow_control == 1)) { 1459 if ((et_pauseparam->rx_pause == 1) && (nesdev->disable_rx_flow_control == 1)) {
diff --git a/drivers/input/input.c b/drivers/input/input.c
index a9b025f4147a..ab6982056518 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -1599,11 +1599,14 @@ EXPORT_SYMBOL(input_free_device);
1599 * @dev: input device supporting MT events and finger tracking 1599 * @dev: input device supporting MT events and finger tracking
1600 * @num_slots: number of slots used by the device 1600 * @num_slots: number of slots used by the device
1601 * 1601 *
1602 * This function allocates all necessary memory for MT slot handling 1602 * This function allocates all necessary memory for MT slot handling in the
1603 * in the input device, and adds ABS_MT_SLOT to the device capabilities. 1603 * input device, and adds ABS_MT_SLOT to the device capabilities. All slots
1604 * are initially marked as unused iby setting ABS_MT_TRACKING_ID to -1.
1604 */ 1605 */
1605int input_mt_create_slots(struct input_dev *dev, unsigned int num_slots) 1606int input_mt_create_slots(struct input_dev *dev, unsigned int num_slots)
1606{ 1607{
1608 int i;
1609
1607 if (!num_slots) 1610 if (!num_slots)
1608 return 0; 1611 return 0;
1609 1612
@@ -1614,6 +1617,10 @@ int input_mt_create_slots(struct input_dev *dev, unsigned int num_slots)
1614 dev->mtsize = num_slots; 1617 dev->mtsize = num_slots;
1615 input_set_abs_params(dev, ABS_MT_SLOT, 0, num_slots - 1, 0, 0); 1618 input_set_abs_params(dev, ABS_MT_SLOT, 0, num_slots - 1, 0, 0);
1616 1619
1620 /* Mark slots as 'unused' */
1621 for (i = 0; i < num_slots; i++)
1622 dev->mt[i].abs[ABS_MT_TRACKING_ID - ABS_MT_FIRST] = -1;
1623
1617 return 0; 1624 return 0;
1618} 1625}
1619EXPORT_SYMBOL(input_mt_create_slots); 1626EXPORT_SYMBOL(input_mt_create_slots);
diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
index d85bd8a7967d..22239e988498 100644
--- a/drivers/input/joydev.c
+++ b/drivers/input/joydev.c
@@ -483,6 +483,9 @@ static int joydev_handle_JSIOCSAXMAP(struct joydev *joydev,
483 483
484 memcpy(joydev->abspam, abspam, len); 484 memcpy(joydev->abspam, abspam, len);
485 485
486 for (i = 0; i < joydev->nabs; i++)
487 joydev->absmap[joydev->abspam[i]] = i;
488
486 out: 489 out:
487 kfree(abspam); 490 kfree(abspam);
488 return retval; 491 return retval;
diff --git a/drivers/input/keyboard/hil_kbd.c b/drivers/input/keyboard/hil_kbd.c
index dcc86b97a153..19fa94af207a 100644
--- a/drivers/input/keyboard/hil_kbd.c
+++ b/drivers/input/keyboard/hil_kbd.c
@@ -232,13 +232,13 @@ static void hil_dev_handle_ptr_events(struct hil_dev *ptr)
232 if (absdev) { 232 if (absdev) {
233 val = lo + (hi << 8); 233 val = lo + (hi << 8);
234#ifdef TABLET_AUTOADJUST 234#ifdef TABLET_AUTOADJUST
235 if (val < input_abs_min(dev, ABS_X + i)) 235 if (val < input_abs_get_min(dev, ABS_X + i))
236 input_abs_set_min(dev, ABS_X + i, val); 236 input_abs_set_min(dev, ABS_X + i, val);
237 if (val > input_abs_max(dev, ABS_X + i)) 237 if (val > input_abs_get_max(dev, ABS_X + i))
238 input_abs_set_max(dev, ABS_X + i, val); 238 input_abs_set_max(dev, ABS_X + i, val);
239#endif 239#endif
240 if (i % 3) 240 if (i % 3)
241 val = input_abs_max(dev, ABS_X + i) - val; 241 val = input_abs_get_max(dev, ABS_X + i) - val;
242 input_report_abs(dev, ABS_X + i, val); 242 input_report_abs(dev, ABS_X + i, val);
243 } else { 243 } else {
244 val = (int) (((int8_t) lo) | ((int8_t) hi << 8)); 244 val = (int) (((int8_t) lo) | ((int8_t) hi << 8));
@@ -388,11 +388,11 @@ static void hil_dev_pointer_setup(struct hil_dev *ptr)
388 388
389#ifdef TABLET_AUTOADJUST 389#ifdef TABLET_AUTOADJUST
390 for (i = 0; i < ABS_MAX; i++) { 390 for (i = 0; i < ABS_MAX; i++) {
391 int diff = input_abs_max(input_dev, ABS_X + i) / 10; 391 int diff = input_abs_get_max(input_dev, ABS_X + i) / 10;
392 input_abs_set_min(input_dev, ABS_X + i, 392 input_abs_set_min(input_dev, ABS_X + i,
393 input_abs_min(input_dev, ABS_X + i) + diff) 393 input_abs_get_min(input_dev, ABS_X + i) + diff);
394 input_abs_set_max(input_dev, ABS_X + i, 394 input_abs_set_max(input_dev, ABS_X + i,
395 input_abs_max(input_dev, ABS_X + i) - diff) 395 input_abs_get_max(input_dev, ABS_X + i) - diff);
396 } 396 }
397#endif 397#endif
398 398
diff --git a/drivers/input/keyboard/pxa27x_keypad.c b/drivers/input/keyboard/pxa27x_keypad.c
index 0e53b3bc39af..f32404f99189 100644
--- a/drivers/input/keyboard/pxa27x_keypad.c
+++ b/drivers/input/keyboard/pxa27x_keypad.c
@@ -567,8 +567,6 @@ static int __devexit pxa27x_keypad_remove(struct platform_device *pdev)
567 clk_put(keypad->clk); 567 clk_put(keypad->clk);
568 568
569 input_unregister_device(keypad->input_dev); 569 input_unregister_device(keypad->input_dev);
570 input_free_device(keypad->input_dev);
571
572 iounmap(keypad->mmio_base); 570 iounmap(keypad->mmio_base);
573 571
574 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 572 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index bb53fd33cd1c..360698553eb5 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -404,6 +404,13 @@ static int uinput_setup_device(struct uinput_device *udev, const char __user *bu
404 retval = uinput_validate_absbits(dev); 404 retval = uinput_validate_absbits(dev);
405 if (retval < 0) 405 if (retval < 0)
406 goto exit; 406 goto exit;
407 if (test_bit(ABS_MT_SLOT, dev->absbit)) {
408 int nslot = input_abs_get_max(dev, ABS_MT_SLOT) + 1;
409 input_mt_create_slots(dev, nslot);
410 input_set_events_per_packet(dev, 6 * nslot);
411 } else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) {
412 input_set_events_per_packet(dev, 60);
413 }
407 } 414 }
408 415
409 udev->state = UIST_SETUP_COMPLETE; 416 udev->state = UIST_SETUP_COMPLETE;
@@ -811,6 +818,8 @@ static struct miscdevice uinput_misc = {
811 .minor = UINPUT_MINOR, 818 .minor = UINPUT_MINOR,
812 .name = UINPUT_NAME, 819 .name = UINPUT_NAME,
813}; 820};
821MODULE_ALIAS_MISCDEV(UINPUT_MINOR);
822MODULE_ALIAS("devname:" UINPUT_NAME);
814 823
815static int __init uinput_init(void) 824static int __init uinput_init(void)
816{ 825{
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
index ea67c49146a3..b95231763911 100644
--- a/drivers/input/mouse/bcm5974.c
+++ b/drivers/input/mouse/bcm5974.c
@@ -337,10 +337,14 @@ static void report_finger_data(struct input_dev *input,
337 const struct bcm5974_config *cfg, 337 const struct bcm5974_config *cfg,
338 const struct tp_finger *f) 338 const struct tp_finger *f)
339{ 339{
340 input_report_abs(input, ABS_MT_TOUCH_MAJOR, raw2int(f->force_major)); 340 input_report_abs(input, ABS_MT_TOUCH_MAJOR,
341 input_report_abs(input, ABS_MT_TOUCH_MINOR, raw2int(f->force_minor)); 341 raw2int(f->force_major) << 1);
342 input_report_abs(input, ABS_MT_WIDTH_MAJOR, raw2int(f->size_major)); 342 input_report_abs(input, ABS_MT_TOUCH_MINOR,
343 input_report_abs(input, ABS_MT_WIDTH_MINOR, raw2int(f->size_minor)); 343 raw2int(f->force_minor) << 1);
344 input_report_abs(input, ABS_MT_WIDTH_MAJOR,
345 raw2int(f->size_major) << 1);
346 input_report_abs(input, ABS_MT_WIDTH_MINOR,
347 raw2int(f->size_minor) << 1);
344 input_report_abs(input, ABS_MT_ORIENTATION, 348 input_report_abs(input, ABS_MT_ORIENTATION,
345 MAX_FINGER_ORIENTATION - raw2int(f->orientation)); 349 MAX_FINGER_ORIENTATION - raw2int(f->orientation));
346 input_report_abs(input, ABS_MT_POSITION_X, raw2int(f->abs_x)); 350 input_report_abs(input, ABS_MT_POSITION_X, raw2int(f->abs_x));
diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
index 83c24cca234a..d528a2dba064 100644
--- a/drivers/input/mousedev.c
+++ b/drivers/input/mousedev.c
@@ -138,8 +138,8 @@ static void mousedev_touchpad_event(struct input_dev *dev,
138 138
139 fx(0) = value; 139 fx(0) = value;
140 if (mousedev->touch && mousedev->pkt_count >= 2) { 140 if (mousedev->touch && mousedev->pkt_count >= 2) {
141 size = input_abs_get_min(dev, ABS_X) - 141 size = input_abs_get_max(dev, ABS_X) -
142 input_abs_get_max(dev, ABS_X); 142 input_abs_get_min(dev, ABS_X);
143 if (size == 0) 143 if (size == 0)
144 size = 256 * 2; 144 size = 256 * 2;
145 145
@@ -155,8 +155,8 @@ static void mousedev_touchpad_event(struct input_dev *dev,
155 fy(0) = value; 155 fy(0) = value;
156 if (mousedev->touch && mousedev->pkt_count >= 2) { 156 if (mousedev->touch && mousedev->pkt_count >= 2) {
157 /* use X size for ABS_Y to keep the same scale */ 157 /* use X size for ABS_Y to keep the same scale */
158 size = input_abs_get_min(dev, ABS_X) - 158 size = input_abs_get_max(dev, ABS_X) -
159 input_abs_get_max(dev, ABS_X); 159 input_abs_get_min(dev, ABS_X);
160 if (size == 0) 160 if (size == 0)
161 size = 256 * 2; 161 size = 256 * 2;
162 162
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 46e4ba0b9246..f58513160480 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -1485,8 +1485,8 @@ static int __init i8042_init(void)
1485 1485
1486static void __exit i8042_exit(void) 1486static void __exit i8042_exit(void)
1487{ 1487{
1488 platform_driver_unregister(&i8042_driver);
1489 platform_device_unregister(i8042_platform_device); 1488 platform_device_unregister(i8042_platform_device);
1489 platform_driver_unregister(&i8042_driver);
1490 i8042_platform_exit(); 1490 i8042_platform_exit();
1491 1491
1492 panic_blink = NULL; 1492 panic_blink = NULL;
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
index 42ba3691d908..b35876ee6908 100644
--- a/drivers/input/tablet/wacom_sys.c
+++ b/drivers/input/tablet/wacom_sys.c
@@ -103,27 +103,26 @@ static void wacom_sys_irq(struct urb *urb)
103static int wacom_open(struct input_dev *dev) 103static int wacom_open(struct input_dev *dev)
104{ 104{
105 struct wacom *wacom = input_get_drvdata(dev); 105 struct wacom *wacom = input_get_drvdata(dev);
106 int retval = 0;
106 107
107 mutex_lock(&wacom->lock); 108 if (usb_autopm_get_interface(wacom->intf) < 0)
108
109 wacom->irq->dev = wacom->usbdev;
110
111 if (usb_autopm_get_interface(wacom->intf) < 0) {
112 mutex_unlock(&wacom->lock);
113 return -EIO; 109 return -EIO;
114 } 110
111 mutex_lock(&wacom->lock);
115 112
116 if (usb_submit_urb(wacom->irq, GFP_KERNEL)) { 113 if (usb_submit_urb(wacom->irq, GFP_KERNEL)) {
117 usb_autopm_put_interface(wacom->intf); 114 retval = -EIO;
118 mutex_unlock(&wacom->lock); 115 goto out;
119 return -EIO;
120 } 116 }
121 117
122 wacom->open = true; 118 wacom->open = true;
123 wacom->intf->needs_remote_wakeup = 1; 119 wacom->intf->needs_remote_wakeup = 1;
124 120
121out:
125 mutex_unlock(&wacom->lock); 122 mutex_unlock(&wacom->lock);
126 return 0; 123 if (retval)
124 usb_autopm_put_interface(wacom->intf);
125 return retval;
127} 126}
128 127
129static void wacom_close(struct input_dev *dev) 128static void wacom_close(struct input_dev *dev)
@@ -135,6 +134,8 @@ static void wacom_close(struct input_dev *dev)
135 wacom->open = false; 134 wacom->open = false;
136 wacom->intf->needs_remote_wakeup = 0; 135 wacom->intf->needs_remote_wakeup = 0;
137 mutex_unlock(&wacom->lock); 136 mutex_unlock(&wacom->lock);
137
138 usb_autopm_put_interface(wacom->intf);
138} 139}
139 140
140static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hid_desc, 141static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hid_desc,
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 40d77ba8fdc1..47fd7a041c52 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -243,10 +243,10 @@ static int wacom_graphire_irq(struct wacom_wac *wacom)
243 if (features->type == WACOM_G4 || 243 if (features->type == WACOM_G4 ||
244 features->type == WACOM_MO) { 244 features->type == WACOM_MO) {
245 input_report_abs(input, ABS_DISTANCE, data[6] & 0x3f); 245 input_report_abs(input, ABS_DISTANCE, data[6] & 0x3f);
246 rw = (signed)(data[7] & 0x04) - (data[7] & 0x03); 246 rw = (data[7] & 0x04) - (data[7] & 0x03);
247 } else { 247 } else {
248 input_report_abs(input, ABS_DISTANCE, data[7] & 0x3f); 248 input_report_abs(input, ABS_DISTANCE, data[7] & 0x3f);
249 rw = -(signed)data[6]; 249 rw = -(signed char)data[6];
250 } 250 }
251 input_report_rel(input, REL_WHEEL, rw); 251 input_report_rel(input, REL_WHEEL, rw);
252 } 252 }
@@ -442,8 +442,10 @@ static void wacom_intuos_general(struct wacom_wac *wacom)
442 /* general pen packet */ 442 /* general pen packet */
443 if ((data[1] & 0xb8) == 0xa0) { 443 if ((data[1] & 0xb8) == 0xa0) {
444 t = (data[6] << 2) | ((data[7] >> 6) & 3); 444 t = (data[6] << 2) | ((data[7] >> 6) & 3);
445 if (features->type >= INTUOS4S && features->type <= INTUOS4L) 445 if ((features->type >= INTUOS4S && features->type <= INTUOS4L) ||
446 features->type == WACOM_21UX2) {
446 t = (t << 1) | (data[1] & 1); 447 t = (t << 1) | (data[1] & 1);
448 }
447 input_report_abs(input, ABS_PRESSURE, t); 449 input_report_abs(input, ABS_PRESSURE, t);
448 input_report_abs(input, ABS_TILT_X, 450 input_report_abs(input, ABS_TILT_X,
449 ((data[7] << 1) & 0x7e) | (data[8] >> 7)); 451 ((data[7] << 1) & 0x7e) | (data[8] >> 7));
diff --git a/drivers/isdn/hardware/avm/Kconfig b/drivers/isdn/hardware/avm/Kconfig
index 5dbcbe3a54a6..b99b906ea9b1 100644
--- a/drivers/isdn/hardware/avm/Kconfig
+++ b/drivers/isdn/hardware/avm/Kconfig
@@ -36,12 +36,13 @@ config ISDN_DRV_AVMB1_T1ISA
36 36
37config ISDN_DRV_AVMB1_B1PCMCIA 37config ISDN_DRV_AVMB1_B1PCMCIA
38 tristate "AVM B1/M1/M2 PCMCIA support" 38 tristate "AVM B1/M1/M2 PCMCIA support"
39 depends on PCMCIA
39 help 40 help
40 Enable support for the PCMCIA version of the AVM B1 card. 41 Enable support for the PCMCIA version of the AVM B1 card.
41 42
42config ISDN_DRV_AVMB1_AVM_CS 43config ISDN_DRV_AVMB1_AVM_CS
43 tristate "AVM B1/M1/M2 PCMCIA cs module" 44 tristate "AVM B1/M1/M2 PCMCIA cs module"
44 depends on ISDN_DRV_AVMB1_B1PCMCIA && PCMCIA 45 depends on ISDN_DRV_AVMB1_B1PCMCIA
45 help 46 help
46 Enable the PCMCIA client driver for the AVM B1/M1/M2 47 Enable the PCMCIA client driver for the AVM B1/M1/M2
47 PCMCIA cards. 48 PCMCIA cards.
diff --git a/drivers/isdn/sc/interrupt.c b/drivers/isdn/sc/interrupt.c
index 485be8b1e1b3..f0225bc0f267 100644
--- a/drivers/isdn/sc/interrupt.c
+++ b/drivers/isdn/sc/interrupt.c
@@ -112,11 +112,19 @@ irqreturn_t interrupt_handler(int dummy, void *card_inst)
112 } 112 }
113 else if(callid>=0x0000 && callid<=0x7FFF) 113 else if(callid>=0x0000 && callid<=0x7FFF)
114 { 114 {
115 int len;
116
115 pr_debug("%s: Got Incoming Call\n", 117 pr_debug("%s: Got Incoming Call\n",
116 sc_adapter[card]->devicename); 118 sc_adapter[card]->devicename);
117 strcpy(setup.phone,&(rcvmsg.msg_data.byte_array[4])); 119 len = strlcpy(setup.phone, &(rcvmsg.msg_data.byte_array[4]),
118 strcpy(setup.eazmsn, 120 sizeof(setup.phone));
119 sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn); 121 if (len >= sizeof(setup.phone))
122 continue;
123 len = strlcpy(setup.eazmsn,
124 sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
125 sizeof(setup.eazmsn));
126 if (len >= sizeof(setup.eazmsn))
127 continue;
120 setup.si1 = 7; 128 setup.si1 = 7;
121 setup.si2 = 0; 129 setup.si2 = 0;
122 setup.plan = 0; 130 setup.plan = 0;
@@ -176,7 +184,9 @@ irqreturn_t interrupt_handler(int dummy, void *card_inst)
176 * Handle a GetMyNumber Rsp 184 * Handle a GetMyNumber Rsp
177 */ 185 */
178 if (IS_CE_MESSAGE(rcvmsg,Call,0,GetMyNumber)){ 186 if (IS_CE_MESSAGE(rcvmsg,Call,0,GetMyNumber)){
179 strcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn,rcvmsg.msg_data.byte_array); 187 strlcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
188 rcvmsg.msg_data.byte_array,
189 sizeof(rcvmsg.msg_data.byte_array));
180 continue; 190 continue;
181 } 191 }
182 192
diff --git a/drivers/leds/leds-ns2.c b/drivers/leds/leds-ns2.c
index 74dce4ba0262..350eb34f049c 100644
--- a/drivers/leds/leds-ns2.c
+++ b/drivers/leds/leds-ns2.c
@@ -81,7 +81,7 @@ static int ns2_led_get_mode(struct ns2_led_data *led_dat,
81 int cmd_level; 81 int cmd_level;
82 int slow_level; 82 int slow_level;
83 83
84 read_lock(&led_dat->rw_lock); 84 read_lock_irq(&led_dat->rw_lock);
85 85
86 cmd_level = gpio_get_value(led_dat->cmd); 86 cmd_level = gpio_get_value(led_dat->cmd);
87 slow_level = gpio_get_value(led_dat->slow); 87 slow_level = gpio_get_value(led_dat->slow);
@@ -95,7 +95,7 @@ static int ns2_led_get_mode(struct ns2_led_data *led_dat,
95 } 95 }
96 } 96 }
97 97
98 read_unlock(&led_dat->rw_lock); 98 read_unlock_irq(&led_dat->rw_lock);
99 99
100 return ret; 100 return ret;
101} 101}
@@ -104,8 +104,9 @@ static void ns2_led_set_mode(struct ns2_led_data *led_dat,
104 enum ns2_led_modes mode) 104 enum ns2_led_modes mode)
105{ 105{
106 int i; 106 int i;
107 unsigned long flags;
107 108
108 write_lock(&led_dat->rw_lock); 109 write_lock_irqsave(&led_dat->rw_lock, flags);
109 110
110 for (i = 0; i < ARRAY_SIZE(ns2_led_modval); i++) { 111 for (i = 0; i < ARRAY_SIZE(ns2_led_modval); i++) {
111 if (mode == ns2_led_modval[i].mode) { 112 if (mode == ns2_led_modval[i].mode) {
@@ -116,7 +117,7 @@ static void ns2_led_set_mode(struct ns2_led_data *led_dat,
116 } 117 }
117 } 118 }
118 119
119 write_unlock(&led_dat->rw_lock); 120 write_unlock_irqrestore(&led_dat->rw_lock, flags);
120} 121}
121 122
122static void ns2_led_set(struct led_classdev *led_cdev, 123static void ns2_led_set(struct led_classdev *led_cdev,
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index 35bc2737412f..2d17e76066bd 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -45,6 +45,7 @@
45#include <linux/syscalls.h> 45#include <linux/syscalls.h>
46#include <linux/suspend.h> 46#include <linux/suspend.h>
47#include <linux/cpu.h> 47#include <linux/cpu.h>
48#include <linux/compat.h>
48#include <asm/prom.h> 49#include <asm/prom.h>
49#include <asm/machdep.h> 50#include <asm/machdep.h>
50#include <asm/io.h> 51#include <asm/io.h>
@@ -2349,11 +2350,52 @@ static long pmu_unlocked_ioctl(struct file *filp,
2349 return ret; 2350 return ret;
2350} 2351}
2351 2352
2353#ifdef CONFIG_COMPAT
2354#define PMU_IOC_GET_BACKLIGHT32 _IOR('B', 1, compat_size_t)
2355#define PMU_IOC_SET_BACKLIGHT32 _IOW('B', 2, compat_size_t)
2356#define PMU_IOC_GET_MODEL32 _IOR('B', 3, compat_size_t)
2357#define PMU_IOC_HAS_ADB32 _IOR('B', 4, compat_size_t)
2358#define PMU_IOC_CAN_SLEEP32 _IOR('B', 5, compat_size_t)
2359#define PMU_IOC_GRAB_BACKLIGHT32 _IOR('B', 6, compat_size_t)
2360
2361static long compat_pmu_ioctl (struct file *filp, u_int cmd, u_long arg)
2362{
2363 switch (cmd) {
2364 case PMU_IOC_SLEEP:
2365 break;
2366 case PMU_IOC_GET_BACKLIGHT32:
2367 cmd = PMU_IOC_GET_BACKLIGHT;
2368 break;
2369 case PMU_IOC_SET_BACKLIGHT32:
2370 cmd = PMU_IOC_SET_BACKLIGHT;
2371 break;
2372 case PMU_IOC_GET_MODEL32:
2373 cmd = PMU_IOC_GET_MODEL;
2374 break;
2375 case PMU_IOC_HAS_ADB32:
2376 cmd = PMU_IOC_HAS_ADB;
2377 break;
2378 case PMU_IOC_CAN_SLEEP32:
2379 cmd = PMU_IOC_CAN_SLEEP;
2380 break;
2381 case PMU_IOC_GRAB_BACKLIGHT32:
2382 cmd = PMU_IOC_GRAB_BACKLIGHT;
2383 break;
2384 default:
2385 return -ENOIOCTLCMD;
2386 }
2387 return pmu_unlocked_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
2388}
2389#endif
2390
2352static const struct file_operations pmu_device_fops = { 2391static const struct file_operations pmu_device_fops = {
2353 .read = pmu_read, 2392 .read = pmu_read,
2354 .write = pmu_write, 2393 .write = pmu_write,
2355 .poll = pmu_fpoll, 2394 .poll = pmu_fpoll,
2356 .unlocked_ioctl = pmu_unlocked_ioctl, 2395 .unlocked_ioctl = pmu_unlocked_ioctl,
2396#ifdef CONFIG_COMPAT
2397 .compat_ioctl = compat_pmu_ioctl,
2398#endif
2357 .open = pmu_open, 2399 .open = pmu_open,
2358 .release = pmu_release, 2400 .release = pmu_release,
2359}; 2401};
diff --git a/drivers/md/.gitignore b/drivers/md/.gitignore
deleted file mode 100644
index a7afec6b19c6..000000000000
--- a/drivers/md/.gitignore
+++ /dev/null
@@ -1,4 +0,0 @@
1mktables
2raid6altivec*.c
3raid6int*.c
4raid6tables.c
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 1ba1e122e948..e4fb58db5454 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -1000,10 +1000,11 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
1000 page = bitmap->sb_page; 1000 page = bitmap->sb_page;
1001 offset = sizeof(bitmap_super_t); 1001 offset = sizeof(bitmap_super_t);
1002 if (!file) 1002 if (!file)
1003 read_sb_page(bitmap->mddev, 1003 page = read_sb_page(
1004 bitmap->mddev->bitmap_info.offset, 1004 bitmap->mddev,
1005 page, 1005 bitmap->mddev->bitmap_info.offset,
1006 index, count); 1006 page,
1007 index, count);
1007 } else if (file) { 1008 } else if (file) {
1008 page = read_page(file, index, bitmap, count); 1009 page = read_page(file, index, bitmap, count);
1009 offset = 0; 1010 offset = 0;
@@ -1542,8 +1543,7 @@ void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector)
1542 atomic_read(&bitmap->mddev->recovery_active) == 0); 1543 atomic_read(&bitmap->mddev->recovery_active) == 0);
1543 1544
1544 bitmap->mddev->curr_resync_completed = bitmap->mddev->curr_resync; 1545 bitmap->mddev->curr_resync_completed = bitmap->mddev->curr_resync;
1545 if (bitmap->mddev->persistent) 1546 set_bit(MD_CHANGE_CLEAN, &bitmap->mddev->flags);
1546 set_bit(MD_CHANGE_CLEAN, &bitmap->mddev->flags);
1547 sector &= ~((1ULL << CHUNK_BLOCK_SHIFT(bitmap)) - 1); 1547 sector &= ~((1ULL << CHUNK_BLOCK_SHIFT(bitmap)) - 1);
1548 s = 0; 1548 s = 0;
1549 while (s < sector && s < bitmap->mddev->resync_max_sectors) { 1549 while (s < sector && s < bitmap->mddev->resync_max_sectors) {
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 3640f025cb72..ed075d19db37 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1578,7 +1578,9 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1578 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1; 1578 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
1579 if (rdev->sb_size & bmask) 1579 if (rdev->sb_size & bmask)
1580 rdev->sb_size = (rdev->sb_size | bmask) + 1; 1580 rdev->sb_size = (rdev->sb_size | bmask) + 1;
1581 } 1581 } else
1582 max_dev = le32_to_cpu(sb->max_dev);
1583
1582 for (i=0; i<max_dev;i++) 1584 for (i=0; i<max_dev;i++)
1583 sb->dev_roles[i] = cpu_to_le16(0xfffe); 1585 sb->dev_roles[i] = cpu_to_le16(0xfffe);
1584 1586
@@ -2102,9 +2104,9 @@ repeat:
2102 rdev->recovery_offset = mddev->curr_resync_completed; 2104 rdev->recovery_offset = mddev->curr_resync_completed;
2103 2105
2104 } 2106 }
2105 if (mddev->external || !mddev->persistent) { 2107 if (!mddev->persistent) {
2106 clear_bit(MD_CHANGE_DEVS, &mddev->flags);
2107 clear_bit(MD_CHANGE_CLEAN, &mddev->flags); 2108 clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
2109 clear_bit(MD_CHANGE_DEVS, &mddev->flags);
2108 wake_up(&mddev->sb_wait); 2110 wake_up(&mddev->sb_wait);
2109 return; 2111 return;
2110 } 2112 }
@@ -2113,7 +2115,6 @@ repeat:
2113 2115
2114 mddev->utime = get_seconds(); 2116 mddev->utime = get_seconds();
2115 2117
2116 set_bit(MD_CHANGE_PENDING, &mddev->flags);
2117 if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags)) 2118 if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
2118 force_change = 1; 2119 force_change = 1;
2119 if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags)) 2120 if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
@@ -3306,7 +3307,7 @@ array_state_show(mddev_t *mddev, char *page)
3306 case 0: 3307 case 0:
3307 if (mddev->in_sync) 3308 if (mddev->in_sync)
3308 st = clean; 3309 st = clean;
3309 else if (test_bit(MD_CHANGE_CLEAN, &mddev->flags)) 3310 else if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
3310 st = write_pending; 3311 st = write_pending;
3311 else if (mddev->safemode) 3312 else if (mddev->safemode)
3312 st = active_idle; 3313 st = active_idle;
@@ -3387,9 +3388,7 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len)
3387 mddev->in_sync = 1; 3388 mddev->in_sync = 1;
3388 if (mddev->safemode == 1) 3389 if (mddev->safemode == 1)
3389 mddev->safemode = 0; 3390 mddev->safemode = 0;
3390 if (mddev->persistent) 3391 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
3391 set_bit(MD_CHANGE_CLEAN,
3392 &mddev->flags);
3393 } 3392 }
3394 err = 0; 3393 err = 0;
3395 } else 3394 } else
@@ -3401,8 +3400,7 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len)
3401 case active: 3400 case active:
3402 if (mddev->pers) { 3401 if (mddev->pers) {
3403 restart_array(mddev); 3402 restart_array(mddev);
3404 if (mddev->external) 3403 clear_bit(MD_CHANGE_PENDING, &mddev->flags);
3405 clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
3406 wake_up(&mddev->sb_wait); 3404 wake_up(&mddev->sb_wait);
3407 err = 0; 3405 err = 0;
3408 } else { 3406 } else {
@@ -6505,6 +6503,7 @@ void md_write_start(mddev_t *mddev, struct bio *bi)
6505 if (mddev->in_sync) { 6503 if (mddev->in_sync) {
6506 mddev->in_sync = 0; 6504 mddev->in_sync = 0;
6507 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 6505 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
6506 set_bit(MD_CHANGE_PENDING, &mddev->flags);
6508 md_wakeup_thread(mddev->thread); 6507 md_wakeup_thread(mddev->thread);
6509 did_change = 1; 6508 did_change = 1;
6510 } 6509 }
@@ -6513,7 +6512,6 @@ void md_write_start(mddev_t *mddev, struct bio *bi)
6513 if (did_change) 6512 if (did_change)
6514 sysfs_notify_dirent_safe(mddev->sysfs_state); 6513 sysfs_notify_dirent_safe(mddev->sysfs_state);
6515 wait_event(mddev->sb_wait, 6514 wait_event(mddev->sb_wait,
6516 !test_bit(MD_CHANGE_CLEAN, &mddev->flags) &&
6517 !test_bit(MD_CHANGE_PENDING, &mddev->flags)); 6515 !test_bit(MD_CHANGE_PENDING, &mddev->flags));
6518} 6516}
6519 6517
@@ -6549,6 +6547,7 @@ int md_allow_write(mddev_t *mddev)
6549 if (mddev->in_sync) { 6547 if (mddev->in_sync) {
6550 mddev->in_sync = 0; 6548 mddev->in_sync = 0;
6551 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 6549 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
6550 set_bit(MD_CHANGE_PENDING, &mddev->flags);
6552 if (mddev->safemode_delay && 6551 if (mddev->safemode_delay &&
6553 mddev->safemode == 0) 6552 mddev->safemode == 0)
6554 mddev->safemode = 1; 6553 mddev->safemode = 1;
@@ -6558,7 +6557,7 @@ int md_allow_write(mddev_t *mddev)
6558 } else 6557 } else
6559 spin_unlock_irq(&mddev->write_lock); 6558 spin_unlock_irq(&mddev->write_lock);
6560 6559
6561 if (test_bit(MD_CHANGE_CLEAN, &mddev->flags)) 6560 if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
6562 return -EAGAIN; 6561 return -EAGAIN;
6563 else 6562 else
6564 return 0; 6563 return 0;
@@ -6756,8 +6755,7 @@ void md_do_sync(mddev_t *mddev)
6756 atomic_read(&mddev->recovery_active) == 0); 6755 atomic_read(&mddev->recovery_active) == 0);
6757 mddev->curr_resync_completed = 6756 mddev->curr_resync_completed =
6758 mddev->curr_resync; 6757 mddev->curr_resync;
6759 if (mddev->persistent) 6758 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
6760 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
6761 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 6759 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
6762 } 6760 }
6763 6761
@@ -7006,7 +7004,7 @@ void md_check_recovery(mddev_t *mddev)
7006 if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) 7004 if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
7007 return; 7005 return;
7008 if ( ! ( 7006 if ( ! (
7009 (mddev->flags && !mddev->external) || 7007 (mddev->flags & ~ (1<<MD_CHANGE_PENDING)) ||
7010 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || 7008 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
7011 test_bit(MD_RECOVERY_DONE, &mddev->recovery) || 7009 test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
7012 (mddev->external == 0 && mddev->safemode == 1) || 7010 (mddev->external == 0 && mddev->safemode == 1) ||
@@ -7036,8 +7034,7 @@ void md_check_recovery(mddev_t *mddev)
7036 mddev->recovery_cp == MaxSector) { 7034 mddev->recovery_cp == MaxSector) {
7037 mddev->in_sync = 1; 7035 mddev->in_sync = 1;
7038 did_change = 1; 7036 did_change = 1;
7039 if (mddev->persistent) 7037 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
7040 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
7041 } 7038 }
7042 if (mddev->safemode == 1) 7039 if (mddev->safemode == 1)
7043 mddev->safemode = 0; 7040 mddev->safemode = 0;
diff --git a/drivers/md/md.h b/drivers/md/md.h
index d8e2ab25103b..112a2c32db0c 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -139,7 +139,7 @@ struct mddev_s
139 unsigned long flags; 139 unsigned long flags;
140#define MD_CHANGE_DEVS 0 /* Some device status has changed */ 140#define MD_CHANGE_DEVS 0 /* Some device status has changed */
141#define MD_CHANGE_CLEAN 1 /* transition to or from 'clean' */ 141#define MD_CHANGE_CLEAN 1 /* transition to or from 'clean' */
142#define MD_CHANGE_PENDING 2 /* superblock update in progress */ 142#define MD_CHANGE_PENDING 2 /* switch from 'clean' to 'active' in progress */
143 143
144 int suspended; 144 int suspended;
145 atomic_t active_io; 145 atomic_t active_io;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 886a9d865488..378a25894c57 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1781,7 +1781,9 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1781 1781
1782 /* take from bio_init */ 1782 /* take from bio_init */
1783 bio->bi_next = NULL; 1783 bio->bi_next = NULL;
1784 bio->bi_flags &= ~(BIO_POOL_MASK-1);
1784 bio->bi_flags |= 1 << BIO_UPTODATE; 1785 bio->bi_flags |= 1 << BIO_UPTODATE;
1786 bio->bi_comp_cpu = -1;
1785 bio->bi_rw = READ; 1787 bio->bi_rw = READ;
1786 bio->bi_vcnt = 0; 1788 bio->bi_vcnt = 0;
1787 bio->bi_idx = 0; 1789 bio->bi_idx = 0;
@@ -1854,7 +1856,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1854 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 1856 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
1855 break; 1857 break;
1856 BUG_ON(sync_blocks < (PAGE_SIZE>>9)); 1858 BUG_ON(sync_blocks < (PAGE_SIZE>>9));
1857 if (len > (sync_blocks<<9)) 1859 if ((len >> 9) > sync_blocks)
1858 len = sync_blocks<<9; 1860 len = sync_blocks<<9;
1859 } 1861 }
1860 1862
diff --git a/drivers/media/IR/ir-keytable.c b/drivers/media/IR/ir-keytable.c
index 7e82a9df726b..7961d59f5cac 100644
--- a/drivers/media/IR/ir-keytable.c
+++ b/drivers/media/IR/ir-keytable.c
@@ -319,7 +319,7 @@ static void ir_timer_keyup(unsigned long cookie)
319 * a keyup event might follow immediately after the keydown. 319 * a keyup event might follow immediately after the keydown.
320 */ 320 */
321 spin_lock_irqsave(&ir->keylock, flags); 321 spin_lock_irqsave(&ir->keylock, flags);
322 if (time_is_after_eq_jiffies(ir->keyup_jiffies)) 322 if (time_is_before_eq_jiffies(ir->keyup_jiffies))
323 ir_keyup(ir); 323 ir_keyup(ir);
324 spin_unlock_irqrestore(&ir->keylock, flags); 324 spin_unlock_irqrestore(&ir->keylock, flags);
325} 325}
@@ -510,6 +510,13 @@ int __ir_input_register(struct input_dev *input_dev,
510 (ir_dev->props && ir_dev->props->driver_type == RC_DRIVER_IR_RAW) ? 510 (ir_dev->props && ir_dev->props->driver_type == RC_DRIVER_IR_RAW) ?
511 " in raw mode" : ""); 511 " in raw mode" : "");
512 512
513 /*
514 * Default delay of 250ms is too short for some protocols, expecially
515 * since the timeout is currently set to 250ms. Increase it to 500ms,
516 * to avoid wrong repetition of the keycodes.
517 */
518 input_dev->rep[REP_DELAY] = 500;
519
513 return 0; 520 return 0;
514 521
515out_event: 522out_event:
diff --git a/drivers/media/IR/ir-lirc-codec.c b/drivers/media/IR/ir-lirc-codec.c
index 77b5946413c0..e63f757d5d72 100644
--- a/drivers/media/IR/ir-lirc-codec.c
+++ b/drivers/media/IR/ir-lirc-codec.c
@@ -267,7 +267,7 @@ static int ir_lirc_register(struct input_dev *input_dev)
267 features |= LIRC_CAN_SET_SEND_CARRIER; 267 features |= LIRC_CAN_SET_SEND_CARRIER;
268 268
269 if (ir_dev->props->s_tx_duty_cycle) 269 if (ir_dev->props->s_tx_duty_cycle)
270 features |= LIRC_CAN_SET_REC_DUTY_CYCLE; 270 features |= LIRC_CAN_SET_SEND_DUTY_CYCLE;
271 } 271 }
272 272
273 if (ir_dev->props->s_rx_carrier_range) 273 if (ir_dev->props->s_rx_carrier_range)
diff --git a/drivers/media/IR/ir-raw-event.c b/drivers/media/IR/ir-raw-event.c
index 43094e7eccfa..8e0e1b1f8c87 100644
--- a/drivers/media/IR/ir-raw-event.c
+++ b/drivers/media/IR/ir-raw-event.c
@@ -279,9 +279,11 @@ int ir_raw_event_register(struct input_dev *input_dev)
279 "rc%u", (unsigned int)ir->devno); 279 "rc%u", (unsigned int)ir->devno);
280 280
281 if (IS_ERR(ir->raw->thread)) { 281 if (IS_ERR(ir->raw->thread)) {
282 int ret = PTR_ERR(ir->raw->thread);
283
282 kfree(ir->raw); 284 kfree(ir->raw);
283 ir->raw = NULL; 285 ir->raw = NULL;
284 return PTR_ERR(ir->raw->thread); 286 return ret;
285 } 287 }
286 288
287 mutex_lock(&ir_raw_handler_lock); 289 mutex_lock(&ir_raw_handler_lock);
diff --git a/drivers/media/IR/ir-sysfs.c b/drivers/media/IR/ir-sysfs.c
index 96dafc425c8e..46d42467f9b4 100644
--- a/drivers/media/IR/ir-sysfs.c
+++ b/drivers/media/IR/ir-sysfs.c
@@ -67,13 +67,14 @@ static ssize_t show_protocols(struct device *d,
67 char *tmp = buf; 67 char *tmp = buf;
68 int i; 68 int i;
69 69
70 if (ir_dev->props->driver_type == RC_DRIVER_SCANCODE) { 70 if (ir_dev->props && ir_dev->props->driver_type == RC_DRIVER_SCANCODE) {
71 enabled = ir_dev->rc_tab.ir_type; 71 enabled = ir_dev->rc_tab.ir_type;
72 allowed = ir_dev->props->allowed_protos; 72 allowed = ir_dev->props->allowed_protos;
73 } else { 73 } else if (ir_dev->raw) {
74 enabled = ir_dev->raw->enabled_protocols; 74 enabled = ir_dev->raw->enabled_protocols;
75 allowed = ir_raw_get_allowed_protocols(); 75 allowed = ir_raw_get_allowed_protocols();
76 } 76 } else
77 return sprintf(tmp, "[builtin]\n");
77 78
78 IR_dprintk(1, "allowed - 0x%llx, enabled - 0x%llx\n", 79 IR_dprintk(1, "allowed - 0x%llx, enabled - 0x%llx\n",
79 (long long)allowed, 80 (long long)allowed,
@@ -121,10 +122,14 @@ static ssize_t store_protocols(struct device *d,
121 int rc, i, count = 0; 122 int rc, i, count = 0;
122 unsigned long flags; 123 unsigned long flags;
123 124
124 if (ir_dev->props->driver_type == RC_DRIVER_SCANCODE) 125 if (ir_dev->props && ir_dev->props->driver_type == RC_DRIVER_SCANCODE)
125 type = ir_dev->rc_tab.ir_type; 126 type = ir_dev->rc_tab.ir_type;
126 else 127 else if (ir_dev->raw)
127 type = ir_dev->raw->enabled_protocols; 128 type = ir_dev->raw->enabled_protocols;
129 else {
130 IR_dprintk(1, "Protocol switching not supported\n");
131 return -EINVAL;
132 }
128 133
129 while ((tmp = strsep((char **) &data, " \n")) != NULL) { 134 while ((tmp = strsep((char **) &data, " \n")) != NULL) {
130 if (!*tmp) 135 if (!*tmp)
@@ -185,7 +190,7 @@ static ssize_t store_protocols(struct device *d,
185 } 190 }
186 } 191 }
187 192
188 if (ir_dev->props->driver_type == RC_DRIVER_SCANCODE) { 193 if (ir_dev->props && ir_dev->props->driver_type == RC_DRIVER_SCANCODE) {
189 spin_lock_irqsave(&ir_dev->rc_tab.lock, flags); 194 spin_lock_irqsave(&ir_dev->rc_tab.lock, flags);
190 ir_dev->rc_tab.ir_type = type; 195 ir_dev->rc_tab.ir_type = type;
191 spin_unlock_irqrestore(&ir_dev->rc_tab.lock, flags); 196 spin_unlock_irqrestore(&ir_dev->rc_tab.lock, flags);
diff --git a/drivers/media/IR/keymaps/rc-rc6-mce.c b/drivers/media/IR/keymaps/rc-rc6-mce.c
index 64264f7f838f..39557ad401b6 100644
--- a/drivers/media/IR/keymaps/rc-rc6-mce.c
+++ b/drivers/media/IR/keymaps/rc-rc6-mce.c
@@ -19,6 +19,7 @@ static struct ir_scancode rc6_mce[] = {
19 19
20 { 0x800f0416, KEY_PLAY }, 20 { 0x800f0416, KEY_PLAY },
21 { 0x800f0418, KEY_PAUSE }, 21 { 0x800f0418, KEY_PAUSE },
22 { 0x800f046e, KEY_PLAYPAUSE },
22 { 0x800f0419, KEY_STOP }, 23 { 0x800f0419, KEY_STOP },
23 { 0x800f0417, KEY_RECORD }, 24 { 0x800f0417, KEY_RECORD },
24 25
@@ -37,6 +38,8 @@ static struct ir_scancode rc6_mce[] = {
37 { 0x800f0411, KEY_VOLUMEDOWN }, 38 { 0x800f0411, KEY_VOLUMEDOWN },
38 { 0x800f0412, KEY_CHANNELUP }, 39 { 0x800f0412, KEY_CHANNELUP },
39 { 0x800f0413, KEY_CHANNELDOWN }, 40 { 0x800f0413, KEY_CHANNELDOWN },
41 { 0x800f043a, KEY_BRIGHTNESSUP },
42 { 0x800f0480, KEY_BRIGHTNESSDOWN },
40 43
41 { 0x800f0401, KEY_NUMERIC_1 }, 44 { 0x800f0401, KEY_NUMERIC_1 },
42 { 0x800f0402, KEY_NUMERIC_2 }, 45 { 0x800f0402, KEY_NUMERIC_2 },
diff --git a/drivers/media/IR/mceusb.c b/drivers/media/IR/mceusb.c
index ac6bb2c01a48..bc620e10ef77 100644
--- a/drivers/media/IR/mceusb.c
+++ b/drivers/media/IR/mceusb.c
@@ -120,6 +120,10 @@ static struct usb_device_id mceusb_dev_table[] = {
120 { USB_DEVICE(VENDOR_PHILIPS, 0x0613) }, 120 { USB_DEVICE(VENDOR_PHILIPS, 0x0613) },
121 /* Philips eHome Infrared Transceiver */ 121 /* Philips eHome Infrared Transceiver */
122 { USB_DEVICE(VENDOR_PHILIPS, 0x0815) }, 122 { USB_DEVICE(VENDOR_PHILIPS, 0x0815) },
123 /* Philips/Spinel plus IR transceiver for ASUS */
124 { USB_DEVICE(VENDOR_PHILIPS, 0x206c) },
125 /* Philips/Spinel plus IR transceiver for ASUS */
126 { USB_DEVICE(VENDOR_PHILIPS, 0x2088) },
123 /* Realtek MCE IR Receiver */ 127 /* Realtek MCE IR Receiver */
124 { USB_DEVICE(VENDOR_REALTEK, 0x0161) }, 128 { USB_DEVICE(VENDOR_REALTEK, 0x0161) },
125 /* SMK/Toshiba G83C0004D410 */ 129 /* SMK/Toshiba G83C0004D410 */
diff --git a/drivers/media/dvb/dvb-usb/dib0700_core.c b/drivers/media/dvb/dvb-usb/dib0700_core.c
index fe818348b8a3..48397f103d32 100644
--- a/drivers/media/dvb/dvb-usb/dib0700_core.c
+++ b/drivers/media/dvb/dvb-usb/dib0700_core.c
@@ -673,9 +673,6 @@ static int dib0700_probe(struct usb_interface *intf,
673 else 673 else
674 dev->props.rc.core.bulk_mode = false; 674 dev->props.rc.core.bulk_mode = false;
675 675
676 /* Need a higher delay, to avoid wrong repeat */
677 dev->rc_input_dev->rep[REP_DELAY] = 500;
678
679 dib0700_rc_setup(dev); 676 dib0700_rc_setup(dev);
680 677
681 return 0; 678 return 0;
diff --git a/drivers/media/dvb/dvb-usb/dib0700_devices.c b/drivers/media/dvb/dvb-usb/dib0700_devices.c
index f634d2e784b2..e06acd1fecb6 100644
--- a/drivers/media/dvb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/dvb/dvb-usb/dib0700_devices.c
@@ -940,6 +940,58 @@ static int stk7070p_frontend_attach(struct dvb_usb_adapter *adap)
940 return adap->fe == NULL ? -ENODEV : 0; 940 return adap->fe == NULL ? -ENODEV : 0;
941} 941}
942 942
943/* STK7770P */
944static struct dib7000p_config dib7770p_dib7000p_config = {
945 .output_mpeg2_in_188_bytes = 1,
946
947 .agc_config_count = 1,
948 .agc = &dib7070_agc_config,
949 .bw = &dib7070_bw_config_12_mhz,
950 .tuner_is_baseband = 1,
951 .spur_protect = 1,
952
953 .gpio_dir = DIB7000P_GPIO_DEFAULT_DIRECTIONS,
954 .gpio_val = DIB7000P_GPIO_DEFAULT_VALUES,
955 .gpio_pwm_pos = DIB7000P_GPIO_DEFAULT_PWM_POS,
956
957 .hostbus_diversity = 1,
958 .enable_current_mirror = 1,
959 .disable_sample_and_hold = 0,
960};
961
962static int stk7770p_frontend_attach(struct dvb_usb_adapter *adap)
963{
964 struct usb_device_descriptor *p = &adap->dev->udev->descriptor;
965 if (p->idVendor == cpu_to_le16(USB_VID_PINNACLE) &&
966 p->idProduct == cpu_to_le16(USB_PID_PINNACLE_PCTV72E))
967 dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 0);
968 else
969 dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1);
970 msleep(10);
971 dib0700_set_gpio(adap->dev, GPIO9, GPIO_OUT, 1);
972 dib0700_set_gpio(adap->dev, GPIO4, GPIO_OUT, 1);
973 dib0700_set_gpio(adap->dev, GPIO7, GPIO_OUT, 1);
974 dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0);
975
976 dib0700_ctrl_clock(adap->dev, 72, 1);
977
978 msleep(10);
979 dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1);
980 msleep(10);
981 dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1);
982
983 if (dib7000p_i2c_enumeration(&adap->dev->i2c_adap, 1, 18,
984 &dib7770p_dib7000p_config) != 0) {
985 err("%s: dib7000p_i2c_enumeration failed. Cannot continue\n",
986 __func__);
987 return -ENODEV;
988 }
989
990 adap->fe = dvb_attach(dib7000p_attach, &adap->dev->i2c_adap, 0x80,
991 &dib7770p_dib7000p_config);
992 return adap->fe == NULL ? -ENODEV : 0;
993}
994
943/* DIB807x generic */ 995/* DIB807x generic */
944static struct dibx000_agc_config dib807x_agc_config[2] = { 996static struct dibx000_agc_config dib807x_agc_config[2] = {
945 { 997 {
@@ -1781,7 +1833,7 @@ struct usb_device_id dib0700_usb_id_table[] = {
1781/* 60 */{ USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_CINERGY_T_XXS_2) }, 1833/* 60 */{ USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_CINERGY_T_XXS_2) },
1782 { USB_DEVICE(USB_VID_DIBCOM, USB_PID_DIBCOM_STK807XPVR) }, 1834 { USB_DEVICE(USB_VID_DIBCOM, USB_PID_DIBCOM_STK807XPVR) },
1783 { USB_DEVICE(USB_VID_DIBCOM, USB_PID_DIBCOM_STK807XP) }, 1835 { USB_DEVICE(USB_VID_DIBCOM, USB_PID_DIBCOM_STK807XP) },
1784 { USB_DEVICE(USB_VID_PIXELVIEW, USB_PID_PIXELVIEW_SBTVD) }, 1836 { USB_DEVICE_VER(USB_VID_PIXELVIEW, USB_PID_PIXELVIEW_SBTVD, 0x000, 0x3f00) },
1785 { USB_DEVICE(USB_VID_EVOLUTEPC, USB_PID_TVWAY_PLUS) }, 1837 { USB_DEVICE(USB_VID_EVOLUTEPC, USB_PID_TVWAY_PLUS) },
1786/* 65 */{ USB_DEVICE(USB_VID_PINNACLE, USB_PID_PINNACLE_PCTV73ESE) }, 1838/* 65 */{ USB_DEVICE(USB_VID_PINNACLE, USB_PID_PINNACLE_PCTV73ESE) },
1787 { USB_DEVICE(USB_VID_PINNACLE, USB_PID_PINNACLE_PCTV282E) }, 1839 { USB_DEVICE(USB_VID_PINNACLE, USB_PID_PINNACLE_PCTV282E) },
@@ -2406,7 +2458,7 @@ struct dvb_usb_device_properties dib0700_devices[] = {
2406 .pid_filter_count = 32, 2458 .pid_filter_count = 32,
2407 .pid_filter = stk70x0p_pid_filter, 2459 .pid_filter = stk70x0p_pid_filter,
2408 .pid_filter_ctrl = stk70x0p_pid_filter_ctrl, 2460 .pid_filter_ctrl = stk70x0p_pid_filter_ctrl,
2409 .frontend_attach = stk7070p_frontend_attach, 2461 .frontend_attach = stk7770p_frontend_attach,
2410 .tuner_attach = dib7770p_tuner_attach, 2462 .tuner_attach = dib7770p_tuner_attach,
2411 2463
2412 DIB0700_DEFAULT_STREAMING_CONFIG(0x02), 2464 DIB0700_DEFAULT_STREAMING_CONFIG(0x02),
diff --git a/drivers/media/dvb/dvb-usb/opera1.c b/drivers/media/dvb/dvb-usb/opera1.c
index 6b22ec64ab0c..f896337b4535 100644
--- a/drivers/media/dvb/dvb-usb/opera1.c
+++ b/drivers/media/dvb/dvb-usb/opera1.c
@@ -483,9 +483,7 @@ static int opera1_xilinx_load_firmware(struct usb_device *dev,
483 } 483 }
484 } 484 }
485 kfree(p); 485 kfree(p);
486 if (fw) { 486 release_firmware(fw);
487 release_firmware(fw);
488 }
489 return ret; 487 return ret;
490} 488}
491 489
diff --git a/drivers/media/dvb/frontends/dib7000p.c b/drivers/media/dvb/frontends/dib7000p.c
index 2e28b973dfd3..3aed0d433921 100644
--- a/drivers/media/dvb/frontends/dib7000p.c
+++ b/drivers/media/dvb/frontends/dib7000p.c
@@ -260,6 +260,9 @@ static void dib7000p_set_adc_state(struct dib7000p_state *state, enum dibx000_ad
260 260
261// dprintk( "908: %x, 909: %x\n", reg_908, reg_909); 261// dprintk( "908: %x, 909: %x\n", reg_908, reg_909);
262 262
263 reg_909 |= (state->cfg.disable_sample_and_hold & 1) << 4;
264 reg_908 |= (state->cfg.enable_current_mirror & 1) << 7;
265
263 dib7000p_write_word(state, 908, reg_908); 266 dib7000p_write_word(state, 908, reg_908);
264 dib7000p_write_word(state, 909, reg_909); 267 dib7000p_write_word(state, 909, reg_909);
265} 268}
@@ -778,7 +781,10 @@ static void dib7000p_set_channel(struct dib7000p_state *state, struct dvb_fronte
778 default: 781 default:
779 case GUARD_INTERVAL_1_32: value *= 1; break; 782 case GUARD_INTERVAL_1_32: value *= 1; break;
780 } 783 }
781 state->div_sync_wait = (value * 3) / 2 + 32; // add 50% SFN margin + compensate for one DVSY-fifo TODO 784 if (state->cfg.diversity_delay == 0)
785 state->div_sync_wait = (value * 3) / 2 + 48; // add 50% SFN margin + compensate for one DVSY-fifo
786 else
787 state->div_sync_wait = (value * 3) / 2 + state->cfg.diversity_delay; // add 50% SFN margin + compensate for one DVSY-fifo
782 788
783 /* deactive the possibility of diversity reception if extended interleaver */ 789 /* deactive the possibility of diversity reception if extended interleaver */
784 state->div_force_off = !1 && ch->u.ofdm.transmission_mode != TRANSMISSION_MODE_8K; 790 state->div_force_off = !1 && ch->u.ofdm.transmission_mode != TRANSMISSION_MODE_8K;
diff --git a/drivers/media/dvb/frontends/dib7000p.h b/drivers/media/dvb/frontends/dib7000p.h
index 805dd13a97ee..da17345bf5bd 100644
--- a/drivers/media/dvb/frontends/dib7000p.h
+++ b/drivers/media/dvb/frontends/dib7000p.h
@@ -33,6 +33,11 @@ struct dib7000p_config {
33 int (*agc_control) (struct dvb_frontend *, u8 before); 33 int (*agc_control) (struct dvb_frontend *, u8 before);
34 34
35 u8 output_mode; 35 u8 output_mode;
36 u8 disable_sample_and_hold : 1;
37
38 u8 enable_current_mirror : 1;
39 u8 diversity_delay;
40
36}; 41};
37 42
38#define DEFAULT_DIB7000P_I2C_ADDRESS 18 43#define DEFAULT_DIB7000P_I2C_ADDRESS 18
diff --git a/drivers/media/dvb/mantis/Kconfig b/drivers/media/dvb/mantis/Kconfig
index decdeda840d0..fd0830ed10d8 100644
--- a/drivers/media/dvb/mantis/Kconfig
+++ b/drivers/media/dvb/mantis/Kconfig
@@ -1,6 +1,6 @@
1config MANTIS_CORE 1config MANTIS_CORE
2 tristate "Mantis/Hopper PCI bridge based devices" 2 tristate "Mantis/Hopper PCI bridge based devices"
3 depends on PCI && I2C && INPUT 3 depends on PCI && I2C && INPUT && IR_CORE
4 4
5 help 5 help
6 Support for PCI cards based on the Mantis and Hopper PCi bridge. 6 Support for PCI cards based on the Mantis and Hopper PCi bridge.
diff --git a/drivers/media/dvb/siano/smscoreapi.c b/drivers/media/dvb/siano/smscoreapi.c
index d93468cd3a85..ff3b0fa901b3 100644
--- a/drivers/media/dvb/siano/smscoreapi.c
+++ b/drivers/media/dvb/siano/smscoreapi.c
@@ -1098,33 +1098,26 @@ EXPORT_SYMBOL_GPL(smscore_onresponse);
1098 * 1098 *
1099 * @return pointer to descriptor on success, NULL on error. 1099 * @return pointer to descriptor on success, NULL on error.
1100 */ 1100 */
1101struct smscore_buffer_t *smscore_getbuffer(struct smscore_device_t *coredev) 1101
1102struct smscore_buffer_t *get_entry(struct smscore_device_t *coredev)
1102{ 1103{
1103 struct smscore_buffer_t *cb = NULL; 1104 struct smscore_buffer_t *cb = NULL;
1104 unsigned long flags; 1105 unsigned long flags;
1105 1106
1106 DEFINE_WAIT(wait);
1107
1108 spin_lock_irqsave(&coredev->bufferslock, flags); 1107 spin_lock_irqsave(&coredev->bufferslock, flags);
1109 1108 if (!list_empty(&coredev->buffers)) {
1110 /* This function must return a valid buffer, since the buffer list is 1109 cb = (struct smscore_buffer_t *) coredev->buffers.next;
1111 * finite, we check that there is an available buffer, if not, we wait 1110 list_del(&cb->entry);
1112 * until such buffer become available.
1113 */
1114
1115 prepare_to_wait(&coredev->buffer_mng_waitq, &wait, TASK_INTERRUPTIBLE);
1116 if (list_empty(&coredev->buffers)) {
1117 spin_unlock_irqrestore(&coredev->bufferslock, flags);
1118 schedule();
1119 spin_lock_irqsave(&coredev->bufferslock, flags);
1120 } 1111 }
1112 spin_unlock_irqrestore(&coredev->bufferslock, flags);
1113 return cb;
1114}
1121 1115
1122 finish_wait(&coredev->buffer_mng_waitq, &wait); 1116struct smscore_buffer_t *smscore_getbuffer(struct smscore_device_t *coredev)
1123 1117{
1124 cb = (struct smscore_buffer_t *) coredev->buffers.next; 1118 struct smscore_buffer_t *cb = NULL;
1125 list_del(&cb->entry);
1126 1119
1127 spin_unlock_irqrestore(&coredev->bufferslock, flags); 1120 wait_event(coredev->buffer_mng_waitq, (cb = get_entry(coredev)));
1128 1121
1129 return cb; 1122 return cb;
1130} 1123}
diff --git a/drivers/media/radio/si470x/radio-si470x-i2c.c b/drivers/media/radio/si470x/radio-si470x-i2c.c
index 67a4ec8768a6..4ce541a5eb47 100644
--- a/drivers/media/radio/si470x/radio-si470x-i2c.c
+++ b/drivers/media/radio/si470x/radio-si470x-i2c.c
@@ -395,7 +395,7 @@ static int __devinit si470x_i2c_probe(struct i2c_client *client,
395 radio->registers[POWERCFG] = POWERCFG_ENABLE; 395 radio->registers[POWERCFG] = POWERCFG_ENABLE;
396 if (si470x_set_register(radio, POWERCFG) < 0) { 396 if (si470x_set_register(radio, POWERCFG) < 0) {
397 retval = -EIO; 397 retval = -EIO;
398 goto err_all; 398 goto err_video;
399 } 399 }
400 msleep(110); 400 msleep(110);
401 401
diff --git a/drivers/media/video/cx231xx/Makefile b/drivers/media/video/cx231xx/Makefile
index 755dd0ce65ff..6f2b57384488 100644
--- a/drivers/media/video/cx231xx/Makefile
+++ b/drivers/media/video/cx231xx/Makefile
@@ -11,4 +11,5 @@ EXTRA_CFLAGS += -Idrivers/media/video
11EXTRA_CFLAGS += -Idrivers/media/common/tuners 11EXTRA_CFLAGS += -Idrivers/media/common/tuners
12EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core 12EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
13EXTRA_CFLAGS += -Idrivers/media/dvb/frontends 13EXTRA_CFLAGS += -Idrivers/media/dvb/frontends
14EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-usb
14 15
diff --git a/drivers/media/video/cx231xx/cx231xx-cards.c b/drivers/media/video/cx231xx/cx231xx-cards.c
index 6bdc0ef18119..f2a4900014bc 100644
--- a/drivers/media/video/cx231xx/cx231xx-cards.c
+++ b/drivers/media/video/cx231xx/cx231xx-cards.c
@@ -32,6 +32,7 @@
32#include <media/v4l2-chip-ident.h> 32#include <media/v4l2-chip-ident.h>
33 33
34#include <media/cx25840.h> 34#include <media/cx25840.h>
35#include "dvb-usb-ids.h"
35#include "xc5000.h" 36#include "xc5000.h"
36 37
37#include "cx231xx.h" 38#include "cx231xx.h"
@@ -175,6 +176,8 @@ struct usb_device_id cx231xx_id_table[] = {
175 .driver_info = CX231XX_BOARD_CNXT_RDE_250}, 176 .driver_info = CX231XX_BOARD_CNXT_RDE_250},
176 {USB_DEVICE(0x0572, 0x58A1), 177 {USB_DEVICE(0x0572, 0x58A1),
177 .driver_info = CX231XX_BOARD_CNXT_RDU_250}, 178 .driver_info = CX231XX_BOARD_CNXT_RDU_250},
179 {USB_DEVICE_VER(USB_VID_PIXELVIEW, USB_PID_PIXELVIEW_SBTVD, 0x4000,0x4fff),
180 .driver_info = CX231XX_BOARD_UNKNOWN},
178 {}, 181 {},
179}; 182};
180 183
@@ -226,14 +229,16 @@ void cx231xx_pre_card_setup(struct cx231xx *dev)
226 dev->board.name, dev->model); 229 dev->board.name, dev->model);
227 230
228 /* set the direction for GPIO pins */ 231 /* set the direction for GPIO pins */
229 cx231xx_set_gpio_direction(dev, dev->board.tuner_gpio->bit, 1); 232 if (dev->board.tuner_gpio) {
230 cx231xx_set_gpio_value(dev, dev->board.tuner_gpio->bit, 1); 233 cx231xx_set_gpio_direction(dev, dev->board.tuner_gpio->bit, 1);
231 cx231xx_set_gpio_direction(dev, dev->board.tuner_sif_gpio, 1); 234 cx231xx_set_gpio_value(dev, dev->board.tuner_gpio->bit, 1);
235 cx231xx_set_gpio_direction(dev, dev->board.tuner_sif_gpio, 1);
232 236
233 /* request some modules if any required */ 237 /* request some modules if any required */
234 238
235 /* reset the Tuner */ 239 /* reset the Tuner */
236 cx231xx_gpio_set(dev, dev->board.tuner_gpio); 240 cx231xx_gpio_set(dev, dev->board.tuner_gpio);
241 }
237 242
238 /* set the mode to Analog mode initially */ 243 /* set the mode to Analog mode initially */
239 cx231xx_set_mode(dev, CX231XX_ANALOG_MODE); 244 cx231xx_set_mode(dev, CX231XX_ANALOG_MODE);
diff --git a/drivers/media/video/cx25840/cx25840-core.c b/drivers/media/video/cx25840/cx25840-core.c
index 86ca8c2359dd..f5a3e74c3c7c 100644
--- a/drivers/media/video/cx25840/cx25840-core.c
+++ b/drivers/media/video/cx25840/cx25840-core.c
@@ -1996,7 +1996,7 @@ static int cx25840_probe(struct i2c_client *client,
1996 1996
1997 state->volume = v4l2_ctrl_new_std(&state->hdl, 1997 state->volume = v4l2_ctrl_new_std(&state->hdl,
1998 &cx25840_audio_ctrl_ops, V4L2_CID_AUDIO_VOLUME, 1998 &cx25840_audio_ctrl_ops, V4L2_CID_AUDIO_VOLUME,
1999 0, 65335, 65535 / 100, default_volume); 1999 0, 65535, 65535 / 100, default_volume);
2000 state->mute = v4l2_ctrl_new_std(&state->hdl, 2000 state->mute = v4l2_ctrl_new_std(&state->hdl,
2001 &cx25840_audio_ctrl_ops, V4L2_CID_AUDIO_MUTE, 2001 &cx25840_audio_ctrl_ops, V4L2_CID_AUDIO_MUTE,
2002 0, 1, 1, 0); 2002 0, 1, 1, 0);
diff --git a/drivers/media/video/cx88/Kconfig b/drivers/media/video/cx88/Kconfig
index 99dbae117591..0fa85cbefbb1 100644
--- a/drivers/media/video/cx88/Kconfig
+++ b/drivers/media/video/cx88/Kconfig
@@ -17,7 +17,7 @@ config VIDEO_CX88
17 17
18config VIDEO_CX88_ALSA 18config VIDEO_CX88_ALSA
19 tristate "Conexant 2388x DMA audio support" 19 tristate "Conexant 2388x DMA audio support"
20 depends on VIDEO_CX88 && SND && EXPERIMENTAL 20 depends on VIDEO_CX88 && SND
21 select SND_PCM 21 select SND_PCM
22 ---help--- 22 ---help---
23 This is a video4linux driver for direct (DMA) audio on 23 This is a video4linux driver for direct (DMA) audio on
diff --git a/drivers/media/video/gspca/gspca.c b/drivers/media/video/gspca/gspca.c
index b9846106913e..78abc1c1f9d5 100644
--- a/drivers/media/video/gspca/gspca.c
+++ b/drivers/media/video/gspca/gspca.c
@@ -223,6 +223,7 @@ static int alloc_and_submit_int_urb(struct gspca_dev *gspca_dev,
223 usb_rcvintpipe(dev, ep->bEndpointAddress), 223 usb_rcvintpipe(dev, ep->bEndpointAddress),
224 buffer, buffer_len, 224 buffer, buffer_len,
225 int_irq, (void *)gspca_dev, interval); 225 int_irq, (void *)gspca_dev, interval);
226 urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
226 gspca_dev->int_urb = urb; 227 gspca_dev->int_urb = urb;
227 ret = usb_submit_urb(urb, GFP_KERNEL); 228 ret = usb_submit_urb(urb, GFP_KERNEL);
228 if (ret < 0) { 229 if (ret < 0) {
diff --git a/drivers/media/video/gspca/sn9c20x.c b/drivers/media/video/gspca/sn9c20x.c
index 83a718f0f3f9..9052d5702556 100644
--- a/drivers/media/video/gspca/sn9c20x.c
+++ b/drivers/media/video/gspca/sn9c20x.c
@@ -2357,8 +2357,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
2357 (data[33] << 10); 2357 (data[33] << 10);
2358 avg_lum >>= 9; 2358 avg_lum >>= 9;
2359 atomic_set(&sd->avg_lum, avg_lum); 2359 atomic_set(&sd->avg_lum, avg_lum);
2360 gspca_frame_add(gspca_dev, LAST_PACKET, 2360 gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0);
2361 data, len);
2362 return; 2361 return;
2363 } 2362 }
2364 if (gspca_dev->last_packet_type == LAST_PACKET) { 2363 if (gspca_dev->last_packet_type == LAST_PACKET) {
diff --git a/drivers/media/video/ivtv/ivtvfb.c b/drivers/media/video/ivtv/ivtvfb.c
index be03a712731c..f0316d02f09f 100644
--- a/drivers/media/video/ivtv/ivtvfb.c
+++ b/drivers/media/video/ivtv/ivtvfb.c
@@ -466,6 +466,8 @@ static int ivtvfb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long ar
466 struct fb_vblank vblank; 466 struct fb_vblank vblank;
467 u32 trace; 467 u32 trace;
468 468
469 memset(&vblank, 0, sizeof(struct fb_vblank));
470
469 vblank.flags = FB_VBLANK_HAVE_COUNT |FB_VBLANK_HAVE_VCOUNT | 471 vblank.flags = FB_VBLANK_HAVE_COUNT |FB_VBLANK_HAVE_VCOUNT |
470 FB_VBLANK_HAVE_VSYNC; 472 FB_VBLANK_HAVE_VSYNC;
471 trace = read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16; 473 trace = read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16;
diff --git a/drivers/media/video/mem2mem_testdev.c b/drivers/media/video/mem2mem_testdev.c
index 4525335f9bd4..a7210d981388 100644
--- a/drivers/media/video/mem2mem_testdev.c
+++ b/drivers/media/video/mem2mem_testdev.c
@@ -239,7 +239,7 @@ static int device_process(struct m2mtest_ctx *ctx,
239 return -EFAULT; 239 return -EFAULT;
240 } 240 }
241 241
242 if (in_buf->vb.size < out_buf->vb.size) { 242 if (in_buf->vb.size > out_buf->vb.size) {
243 v4l2_err(&dev->v4l2_dev, "Output buffer is too small\n"); 243 v4l2_err(&dev->v4l2_dev, "Output buffer is too small\n");
244 return -EINVAL; 244 return -EINVAL;
245 } 245 }
@@ -1014,6 +1014,7 @@ static int m2mtest_remove(struct platform_device *pdev)
1014 v4l2_m2m_release(dev->m2m_dev); 1014 v4l2_m2m_release(dev->m2m_dev);
1015 del_timer_sync(&dev->timer); 1015 del_timer_sync(&dev->timer);
1016 video_unregister_device(dev->vfd); 1016 video_unregister_device(dev->vfd);
1017 video_device_release(dev->vfd);
1017 v4l2_device_unregister(&dev->v4l2_dev); 1018 v4l2_device_unregister(&dev->v4l2_dev);
1018 kfree(dev); 1019 kfree(dev);
1019 1020
diff --git a/drivers/media/video/mt9m111.c b/drivers/media/video/mt9m111.c
index 758a4db27d65..c71af4e0e517 100644
--- a/drivers/media/video/mt9m111.c
+++ b/drivers/media/video/mt9m111.c
@@ -447,6 +447,9 @@ static int mt9m111_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
447 dev_dbg(&client->dev, "%s left=%d, top=%d, width=%d, height=%d\n", 447 dev_dbg(&client->dev, "%s left=%d, top=%d, width=%d, height=%d\n",
448 __func__, rect.left, rect.top, rect.width, rect.height); 448 __func__, rect.left, rect.top, rect.width, rect.height);
449 449
450 if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
451 return -EINVAL;
452
450 ret = mt9m111_make_rect(client, &rect); 453 ret = mt9m111_make_rect(client, &rect);
451 if (!ret) 454 if (!ret)
452 mt9m111->rect = rect; 455 mt9m111->rect = rect;
@@ -466,12 +469,14 @@ static int mt9m111_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
466 469
467static int mt9m111_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a) 470static int mt9m111_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
468{ 471{
472 if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
473 return -EINVAL;
474
469 a->bounds.left = MT9M111_MIN_DARK_COLS; 475 a->bounds.left = MT9M111_MIN_DARK_COLS;
470 a->bounds.top = MT9M111_MIN_DARK_ROWS; 476 a->bounds.top = MT9M111_MIN_DARK_ROWS;
471 a->bounds.width = MT9M111_MAX_WIDTH; 477 a->bounds.width = MT9M111_MAX_WIDTH;
472 a->bounds.height = MT9M111_MAX_HEIGHT; 478 a->bounds.height = MT9M111_MAX_HEIGHT;
473 a->defrect = a->bounds; 479 a->defrect = a->bounds;
474 a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
475 a->pixelaspect.numerator = 1; 480 a->pixelaspect.numerator = 1;
476 a->pixelaspect.denominator = 1; 481 a->pixelaspect.denominator = 1;
477 482
@@ -487,6 +492,7 @@ static int mt9m111_g_fmt(struct v4l2_subdev *sd,
487 mf->width = mt9m111->rect.width; 492 mf->width = mt9m111->rect.width;
488 mf->height = mt9m111->rect.height; 493 mf->height = mt9m111->rect.height;
489 mf->code = mt9m111->fmt->code; 494 mf->code = mt9m111->fmt->code;
495 mf->colorspace = mt9m111->fmt->colorspace;
490 mf->field = V4L2_FIELD_NONE; 496 mf->field = V4L2_FIELD_NONE;
491 497
492 return 0; 498 return 0;
diff --git a/drivers/media/video/mt9v022.c b/drivers/media/video/mt9v022.c
index e7cd23cd6394..b48473c7896b 100644
--- a/drivers/media/video/mt9v022.c
+++ b/drivers/media/video/mt9v022.c
@@ -402,9 +402,6 @@ static int mt9v022_s_fmt(struct v4l2_subdev *sd,
402 if (mt9v022->model != V4L2_IDENT_MT9V022IX7ATC) 402 if (mt9v022->model != V4L2_IDENT_MT9V022IX7ATC)
403 return -EINVAL; 403 return -EINVAL;
404 break; 404 break;
405 case 0:
406 /* No format change, only geometry */
407 break;
408 default: 405 default:
409 return -EINVAL; 406 return -EINVAL;
410 } 407 }
diff --git a/drivers/media/video/mx2_camera.c b/drivers/media/video/mx2_camera.c
index 66ff174151b5..b6ea67221d1d 100644
--- a/drivers/media/video/mx2_camera.c
+++ b/drivers/media/video/mx2_camera.c
@@ -378,6 +378,9 @@ static void mx25_camera_frame_done(struct mx2_camera_dev *pcdev, int fb,
378 378
379 spin_lock_irqsave(&pcdev->lock, flags); 379 spin_lock_irqsave(&pcdev->lock, flags);
380 380
381 if (*fb_active == NULL)
382 goto out;
383
381 vb = &(*fb_active)->vb; 384 vb = &(*fb_active)->vb;
382 dev_dbg(pcdev->dev, "%s (vb=0x%p) 0x%08lx %d\n", __func__, 385 dev_dbg(pcdev->dev, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
383 vb, vb->baddr, vb->bsize); 386 vb, vb->baddr, vb->bsize);
@@ -402,6 +405,7 @@ static void mx25_camera_frame_done(struct mx2_camera_dev *pcdev, int fb,
402 405
403 *fb_active = buf; 406 *fb_active = buf;
404 407
408out:
405 spin_unlock_irqrestore(&pcdev->lock, flags); 409 spin_unlock_irqrestore(&pcdev->lock, flags);
406} 410}
407 411
diff --git a/drivers/media/video/pvrusb2/pvrusb2-ctrl.c b/drivers/media/video/pvrusb2/pvrusb2-ctrl.c
index 1b992b847198..55ea914c7fcd 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-ctrl.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-ctrl.c
@@ -513,7 +513,7 @@ int pvr2_ctrl_sym_to_value(struct pvr2_ctrl *cptr,
513 if (ret >= 0) { 513 if (ret >= 0) {
514 ret = pvr2_ctrl_range_check(cptr,*valptr); 514 ret = pvr2_ctrl_range_check(cptr,*valptr);
515 } 515 }
516 if (maskptr) *maskptr = ~0; 516 *maskptr = ~0;
517 } else if (cptr->info->type == pvr2_ctl_bool) { 517 } else if (cptr->info->type == pvr2_ctl_bool) {
518 ret = parse_token(ptr,len,valptr,boolNames, 518 ret = parse_token(ptr,len,valptr,boolNames,
519 ARRAY_SIZE(boolNames)); 519 ARRAY_SIZE(boolNames));
@@ -522,7 +522,7 @@ int pvr2_ctrl_sym_to_value(struct pvr2_ctrl *cptr,
522 } else if (ret == 0) { 522 } else if (ret == 0) {
523 *valptr = (*valptr & 1) ? !0 : 0; 523 *valptr = (*valptr & 1) ? !0 : 0;
524 } 524 }
525 if (maskptr) *maskptr = 1; 525 *maskptr = 1;
526 } else if (cptr->info->type == pvr2_ctl_enum) { 526 } else if (cptr->info->type == pvr2_ctl_enum) {
527 ret = parse_token( 527 ret = parse_token(
528 ptr,len,valptr, 528 ptr,len,valptr,
@@ -531,7 +531,7 @@ int pvr2_ctrl_sym_to_value(struct pvr2_ctrl *cptr,
531 if (ret >= 0) { 531 if (ret >= 0) {
532 ret = pvr2_ctrl_range_check(cptr,*valptr); 532 ret = pvr2_ctrl_range_check(cptr,*valptr);
533 } 533 }
534 if (maskptr) *maskptr = ~0; 534 *maskptr = ~0;
535 } else if (cptr->info->type == pvr2_ctl_bitmask) { 535 } else if (cptr->info->type == pvr2_ctl_bitmask) {
536 ret = parse_tlist( 536 ret = parse_tlist(
537 ptr,len,maskptr,valptr, 537 ptr,len,maskptr,valptr,
diff --git a/drivers/media/video/s5p-fimc/fimc-core.c b/drivers/media/video/s5p-fimc/fimc-core.c
index b151c7be8a50..6961c55baf9b 100644
--- a/drivers/media/video/s5p-fimc/fimc-core.c
+++ b/drivers/media/video/s5p-fimc/fimc-core.c
@@ -393,6 +393,37 @@ static void fimc_set_yuv_order(struct fimc_ctx *ctx)
393 dbg("ctx->out_order_1p= %d", ctx->out_order_1p); 393 dbg("ctx->out_order_1p= %d", ctx->out_order_1p);
394} 394}
395 395
396static void fimc_prepare_dma_offset(struct fimc_ctx *ctx, struct fimc_frame *f)
397{
398 struct samsung_fimc_variant *variant = ctx->fimc_dev->variant;
399
400 f->dma_offset.y_h = f->offs_h;
401 if (!variant->pix_hoff)
402 f->dma_offset.y_h *= (f->fmt->depth >> 3);
403
404 f->dma_offset.y_v = f->offs_v;
405
406 f->dma_offset.cb_h = f->offs_h;
407 f->dma_offset.cb_v = f->offs_v;
408
409 f->dma_offset.cr_h = f->offs_h;
410 f->dma_offset.cr_v = f->offs_v;
411
412 if (!variant->pix_hoff) {
413 if (f->fmt->planes_cnt == 3) {
414 f->dma_offset.cb_h >>= 1;
415 f->dma_offset.cr_h >>= 1;
416 }
417 if (f->fmt->color == S5P_FIMC_YCBCR420) {
418 f->dma_offset.cb_v >>= 1;
419 f->dma_offset.cr_v >>= 1;
420 }
421 }
422
423 dbg("in_offset: color= %d, y_h= %d, y_v= %d",
424 f->fmt->color, f->dma_offset.y_h, f->dma_offset.y_v);
425}
426
396/** 427/**
397 * fimc_prepare_config - check dimensions, operation and color mode 428 * fimc_prepare_config - check dimensions, operation and color mode
398 * and pre-calculate offset and the scaling coefficients. 429 * and pre-calculate offset and the scaling coefficients.
@@ -406,7 +437,6 @@ static int fimc_prepare_config(struct fimc_ctx *ctx, u32 flags)
406{ 437{
407 struct fimc_frame *s_frame, *d_frame; 438 struct fimc_frame *s_frame, *d_frame;
408 struct fimc_vid_buffer *buf = NULL; 439 struct fimc_vid_buffer *buf = NULL;
409 struct samsung_fimc_variant *variant = ctx->fimc_dev->variant;
410 int ret = 0; 440 int ret = 0;
411 441
412 s_frame = &ctx->s_frame; 442 s_frame = &ctx->s_frame;
@@ -419,61 +449,16 @@ static int fimc_prepare_config(struct fimc_ctx *ctx, u32 flags)
419 swap(d_frame->width, d_frame->height); 449 swap(d_frame->width, d_frame->height);
420 } 450 }
421 451
422 /* Prepare the output offset ratios for scaler. */ 452 /* Prepare the DMA offset ratios for scaler. */
423 d_frame->dma_offset.y_h = d_frame->offs_h; 453 fimc_prepare_dma_offset(ctx, &ctx->s_frame);
424 if (!variant->pix_hoff) 454 fimc_prepare_dma_offset(ctx, &ctx->d_frame);
425 d_frame->dma_offset.y_h *= (d_frame->fmt->depth >> 3);
426
427 d_frame->dma_offset.y_v = d_frame->offs_v;
428
429 d_frame->dma_offset.cb_h = d_frame->offs_h;
430 d_frame->dma_offset.cb_v = d_frame->offs_v;
431
432 d_frame->dma_offset.cr_h = d_frame->offs_h;
433 d_frame->dma_offset.cr_v = d_frame->offs_v;
434 455
435 if (!variant->pix_hoff && d_frame->fmt->planes_cnt == 3) {
436 d_frame->dma_offset.cb_h >>= 1;
437 d_frame->dma_offset.cb_v >>= 1;
438 d_frame->dma_offset.cr_h >>= 1;
439 d_frame->dma_offset.cr_v >>= 1;
440 }
441
442 dbg("out offset: color= %d, y_h= %d, y_v= %d",
443 d_frame->fmt->color,
444 d_frame->dma_offset.y_h, d_frame->dma_offset.y_v);
445
446 /* Prepare the input offset ratios for scaler. */
447 s_frame->dma_offset.y_h = s_frame->offs_h;
448 if (!variant->pix_hoff)
449 s_frame->dma_offset.y_h *= (s_frame->fmt->depth >> 3);
450 s_frame->dma_offset.y_v = s_frame->offs_v;
451
452 s_frame->dma_offset.cb_h = s_frame->offs_h;
453 s_frame->dma_offset.cb_v = s_frame->offs_v;
454
455 s_frame->dma_offset.cr_h = s_frame->offs_h;
456 s_frame->dma_offset.cr_v = s_frame->offs_v;
457
458 if (!variant->pix_hoff && s_frame->fmt->planes_cnt == 3) {
459 s_frame->dma_offset.cb_h >>= 1;
460 s_frame->dma_offset.cb_v >>= 1;
461 s_frame->dma_offset.cr_h >>= 1;
462 s_frame->dma_offset.cr_v >>= 1;
463 }
464
465 dbg("in offset: color= %d, y_h= %d, y_v= %d",
466 s_frame->fmt->color, s_frame->dma_offset.y_h,
467 s_frame->dma_offset.y_v);
468
469 fimc_set_yuv_order(ctx);
470
471 /* Check against the scaler ratio. */
472 if (s_frame->height > (SCALER_MAX_VRATIO * d_frame->height) || 456 if (s_frame->height > (SCALER_MAX_VRATIO * d_frame->height) ||
473 s_frame->width > (SCALER_MAX_HRATIO * d_frame->width)) { 457 s_frame->width > (SCALER_MAX_HRATIO * d_frame->width)) {
474 err("out of scaler range"); 458 err("out of scaler range");
475 return -EINVAL; 459 return -EINVAL;
476 } 460 }
461 fimc_set_yuv_order(ctx);
477 } 462 }
478 463
479 /* Input DMA mode is not allowed when the scaler is disabled. */ 464 /* Input DMA mode is not allowed when the scaler is disabled. */
@@ -822,7 +807,8 @@ static int fimc_m2m_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
822 } else { 807 } else {
823 v4l2_err(&ctx->fimc_dev->m2m.v4l2_dev, 808 v4l2_err(&ctx->fimc_dev->m2m.v4l2_dev,
824 "Wrong buffer/video queue type (%d)\n", f->type); 809 "Wrong buffer/video queue type (%d)\n", f->type);
825 return -EINVAL; 810 ret = -EINVAL;
811 goto s_fmt_out;
826 } 812 }
827 813
828 pix = &f->fmt.pix; 814 pix = &f->fmt.pix;
@@ -1414,8 +1400,10 @@ static int fimc_probe(struct platform_device *pdev)
1414 } 1400 }
1415 1401
1416 fimc->work_queue = create_workqueue(dev_name(&fimc->pdev->dev)); 1402 fimc->work_queue = create_workqueue(dev_name(&fimc->pdev->dev));
1417 if (!fimc->work_queue) 1403 if (!fimc->work_queue) {
1404 ret = -ENOMEM;
1418 goto err_irq; 1405 goto err_irq;
1406 }
1419 1407
1420 ret = fimc_register_m2m_device(fimc); 1408 ret = fimc_register_m2m_device(fimc);
1421 if (ret) 1409 if (ret)
@@ -1492,6 +1480,7 @@ static struct samsung_fimc_variant fimc2_variant_s5p = {
1492}; 1480};
1493 1481
1494static struct samsung_fimc_variant fimc01_variant_s5pv210 = { 1482static struct samsung_fimc_variant fimc01_variant_s5pv210 = {
1483 .pix_hoff = 1,
1495 .has_inp_rot = 1, 1484 .has_inp_rot = 1,
1496 .has_out_rot = 1, 1485 .has_out_rot = 1,
1497 .min_inp_pixsize = 16, 1486 .min_inp_pixsize = 16,
@@ -1506,6 +1495,7 @@ static struct samsung_fimc_variant fimc01_variant_s5pv210 = {
1506}; 1495};
1507 1496
1508static struct samsung_fimc_variant fimc2_variant_s5pv210 = { 1497static struct samsung_fimc_variant fimc2_variant_s5pv210 = {
1498 .pix_hoff = 1,
1509 .min_inp_pixsize = 16, 1499 .min_inp_pixsize = 16,
1510 .min_out_pixsize = 32, 1500 .min_out_pixsize = 32,
1511 1501
diff --git a/drivers/media/video/saa7134/saa7134-cards.c b/drivers/media/video/saa7134/saa7134-cards.c
index ec697fcd406e..bb8d83d8ddaf 100644
--- a/drivers/media/video/saa7134/saa7134-cards.c
+++ b/drivers/media/video/saa7134/saa7134-cards.c
@@ -4323,13 +4323,13 @@ struct saa7134_board saa7134_boards[] = {
4323 }, 4323 },
4324 [SAA7134_BOARD_BEHOLD_COLUMBUS_TVFM] = { 4324 [SAA7134_BOARD_BEHOLD_COLUMBUS_TVFM] = {
4325 /* Beholder Intl. Ltd. 2008 */ 4325 /* Beholder Intl. Ltd. 2008 */
4326 /*Dmitry Belimov <d.belimov@gmail.com> */ 4326 /* Dmitry Belimov <d.belimov@gmail.com> */
4327 .name = "Beholder BeholdTV Columbus TVFM", 4327 .name = "Beholder BeholdTV Columbus TV/FM",
4328 .audio_clock = 0x00187de7, 4328 .audio_clock = 0x00187de7,
4329 .tuner_type = TUNER_ALPS_TSBE5_PAL, 4329 .tuner_type = TUNER_ALPS_TSBE5_PAL,
4330 .radio_type = UNSET, 4330 .radio_type = TUNER_TEA5767,
4331 .tuner_addr = ADDR_UNSET, 4331 .tuner_addr = 0xc2 >> 1,
4332 .radio_addr = ADDR_UNSET, 4332 .radio_addr = 0xc0 >> 1,
4333 .tda9887_conf = TDA9887_PRESENT, 4333 .tda9887_conf = TDA9887_PRESENT,
4334 .gpiomask = 0x000A8004, 4334 .gpiomask = 0x000A8004,
4335 .inputs = {{ 4335 .inputs = {{
diff --git a/drivers/media/video/saa7164/saa7164-buffer.c b/drivers/media/video/saa7164/saa7164-buffer.c
index 5713f3a4b76c..ddd25d32723d 100644
--- a/drivers/media/video/saa7164/saa7164-buffer.c
+++ b/drivers/media/video/saa7164/saa7164-buffer.c
@@ -136,10 +136,11 @@ ret:
136int saa7164_buffer_dealloc(struct saa7164_tsport *port, 136int saa7164_buffer_dealloc(struct saa7164_tsport *port,
137 struct saa7164_buffer *buf) 137 struct saa7164_buffer *buf)
138{ 138{
139 struct saa7164_dev *dev = port->dev; 139 struct saa7164_dev *dev;
140 140
141 if ((buf == 0) || (port == 0)) 141 if (!buf || !port)
142 return SAA_ERR_BAD_PARAMETER; 142 return SAA_ERR_BAD_PARAMETER;
143 dev = port->dev;
143 144
144 dprintk(DBGLVL_BUF, "%s() deallocating buffer @ 0x%p\n", __func__, buf); 145 dprintk(DBGLVL_BUF, "%s() deallocating buffer @ 0x%p\n", __func__, buf);
145 146
diff --git a/drivers/media/video/uvc/uvc_driver.c b/drivers/media/video/uvc/uvc_driver.c
index 8bdd940f32e6..2ac85d8984f0 100644
--- a/drivers/media/video/uvc/uvc_driver.c
+++ b/drivers/media/video/uvc/uvc_driver.c
@@ -486,6 +486,12 @@ static int uvc_parse_format(struct uvc_device *dev,
486 max(frame->dwFrameInterval[0], 486 max(frame->dwFrameInterval[0],
487 frame->dwDefaultFrameInterval)); 487 frame->dwDefaultFrameInterval));
488 488
489 if (dev->quirks & UVC_QUIRK_RESTRICT_FRAME_RATE) {
490 frame->bFrameIntervalType = 1;
491 frame->dwFrameInterval[0] =
492 frame->dwDefaultFrameInterval;
493 }
494
489 uvc_trace(UVC_TRACE_DESCR, "- %ux%u (%u.%u fps)\n", 495 uvc_trace(UVC_TRACE_DESCR, "- %ux%u (%u.%u fps)\n",
490 frame->wWidth, frame->wHeight, 496 frame->wWidth, frame->wHeight,
491 10000000/frame->dwDefaultFrameInterval, 497 10000000/frame->dwDefaultFrameInterval,
@@ -2026,6 +2032,15 @@ static struct usb_device_id uvc_ids[] = {
2026 .bInterfaceClass = USB_CLASS_VENDOR_SPEC, 2032 .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
2027 .bInterfaceSubClass = 1, 2033 .bInterfaceSubClass = 1,
2028 .bInterfaceProtocol = 0 }, 2034 .bInterfaceProtocol = 0 },
2035 /* Chicony CNF7129 (Asus EEE 100HE) */
2036 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
2037 | USB_DEVICE_ID_MATCH_INT_INFO,
2038 .idVendor = 0x04f2,
2039 .idProduct = 0xb071,
2040 .bInterfaceClass = USB_CLASS_VIDEO,
2041 .bInterfaceSubClass = 1,
2042 .bInterfaceProtocol = 0,
2043 .driver_info = UVC_QUIRK_RESTRICT_FRAME_RATE },
2029 /* Alcor Micro AU3820 (Future Boy PC USB Webcam) */ 2044 /* Alcor Micro AU3820 (Future Boy PC USB Webcam) */
2030 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE 2045 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
2031 | USB_DEVICE_ID_MATCH_INT_INFO, 2046 | USB_DEVICE_ID_MATCH_INT_INFO,
@@ -2091,6 +2106,15 @@ static struct usb_device_id uvc_ids[] = {
2091 .bInterfaceProtocol = 0, 2106 .bInterfaceProtocol = 0,
2092 .driver_info = UVC_QUIRK_PROBE_MINMAX 2107 .driver_info = UVC_QUIRK_PROBE_MINMAX
2093 | UVC_QUIRK_PROBE_DEF }, 2108 | UVC_QUIRK_PROBE_DEF },
2109 /* IMC Networks (Medion Akoya) */
2110 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
2111 | USB_DEVICE_ID_MATCH_INT_INFO,
2112 .idVendor = 0x13d3,
2113 .idProduct = 0x5103,
2114 .bInterfaceClass = USB_CLASS_VIDEO,
2115 .bInterfaceSubClass = 1,
2116 .bInterfaceProtocol = 0,
2117 .driver_info = UVC_QUIRK_STREAM_NO_FID },
2094 /* Syntek (HP Spartan) */ 2118 /* Syntek (HP Spartan) */
2095 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE 2119 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
2096 | USB_DEVICE_ID_MATCH_INT_INFO, 2120 | USB_DEVICE_ID_MATCH_INT_INFO,
diff --git a/drivers/media/video/uvc/uvcvideo.h b/drivers/media/video/uvc/uvcvideo.h
index bdacf3beabf5..892e0e51916c 100644
--- a/drivers/media/video/uvc/uvcvideo.h
+++ b/drivers/media/video/uvc/uvcvideo.h
@@ -182,6 +182,7 @@ struct uvc_xu_control {
182#define UVC_QUIRK_IGNORE_SELECTOR_UNIT 0x00000020 182#define UVC_QUIRK_IGNORE_SELECTOR_UNIT 0x00000020
183#define UVC_QUIRK_FIX_BANDWIDTH 0x00000080 183#define UVC_QUIRK_FIX_BANDWIDTH 0x00000080
184#define UVC_QUIRK_PROBE_DEF 0x00000100 184#define UVC_QUIRK_PROBE_DEF 0x00000100
185#define UVC_QUIRK_RESTRICT_FRAME_RATE 0x00000200
185 186
186/* Format flags */ 187/* Format flags */
187#define UVC_FMT_FLAG_COMPRESSED 0x00000001 188#define UVC_FMT_FLAG_COMPRESSED 0x00000001
diff --git a/drivers/media/video/videobuf-dma-contig.c b/drivers/media/video/videobuf-dma-contig.c
index 372b87efcd05..6ff9e4bac3ea 100644
--- a/drivers/media/video/videobuf-dma-contig.c
+++ b/drivers/media/video/videobuf-dma-contig.c
@@ -393,8 +393,10 @@ void videobuf_dma_contig_free(struct videobuf_queue *q,
393 } 393 }
394 394
395 /* read() method */ 395 /* read() method */
396 dma_free_coherent(q->dev, mem->size, mem->vaddr, mem->dma_handle); 396 if (mem->vaddr) {
397 mem->vaddr = NULL; 397 dma_free_coherent(q->dev, mem->size, mem->vaddr, mem->dma_handle);
398 mem->vaddr = NULL;
399 }
398} 400}
399EXPORT_SYMBOL_GPL(videobuf_dma_contig_free); 401EXPORT_SYMBOL_GPL(videobuf_dma_contig_free);
400 402
diff --git a/drivers/media/video/videobuf-dma-sg.c b/drivers/media/video/videobuf-dma-sg.c
index 06f9a9c2a39a..2ad0bc252b0e 100644
--- a/drivers/media/video/videobuf-dma-sg.c
+++ b/drivers/media/video/videobuf-dma-sg.c
@@ -94,7 +94,7 @@ err:
94 * must free the memory. 94 * must free the memory.
95 */ 95 */
96static struct scatterlist *videobuf_pages_to_sg(struct page **pages, 96static struct scatterlist *videobuf_pages_to_sg(struct page **pages,
97 int nr_pages, int offset) 97 int nr_pages, int offset, size_t size)
98{ 98{
99 struct scatterlist *sglist; 99 struct scatterlist *sglist;
100 int i; 100 int i;
@@ -110,12 +110,14 @@ static struct scatterlist *videobuf_pages_to_sg(struct page **pages,
110 /* DMA to highmem pages might not work */ 110 /* DMA to highmem pages might not work */
111 goto highmem; 111 goto highmem;
112 sg_set_page(&sglist[0], pages[0], PAGE_SIZE - offset, offset); 112 sg_set_page(&sglist[0], pages[0], PAGE_SIZE - offset, offset);
113 size -= PAGE_SIZE - offset;
113 for (i = 1; i < nr_pages; i++) { 114 for (i = 1; i < nr_pages; i++) {
114 if (NULL == pages[i]) 115 if (NULL == pages[i])
115 goto nopage; 116 goto nopage;
116 if (PageHighMem(pages[i])) 117 if (PageHighMem(pages[i]))
117 goto highmem; 118 goto highmem;
118 sg_set_page(&sglist[i], pages[i], PAGE_SIZE, 0); 119 sg_set_page(&sglist[i], pages[i], min(PAGE_SIZE, size), 0);
120 size -= min(PAGE_SIZE, size);
119 } 121 }
120 return sglist; 122 return sglist;
121 123
@@ -170,7 +172,8 @@ static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma,
170 172
171 first = (data & PAGE_MASK) >> PAGE_SHIFT; 173 first = (data & PAGE_MASK) >> PAGE_SHIFT;
172 last = ((data+size-1) & PAGE_MASK) >> PAGE_SHIFT; 174 last = ((data+size-1) & PAGE_MASK) >> PAGE_SHIFT;
173 dma->offset = data & ~PAGE_MASK; 175 dma->offset = data & ~PAGE_MASK;
176 dma->size = size;
174 dma->nr_pages = last-first+1; 177 dma->nr_pages = last-first+1;
175 dma->pages = kmalloc(dma->nr_pages * sizeof(struct page *), GFP_KERNEL); 178 dma->pages = kmalloc(dma->nr_pages * sizeof(struct page *), GFP_KERNEL);
176 if (NULL == dma->pages) 179 if (NULL == dma->pages)
@@ -252,7 +255,7 @@ int videobuf_dma_map(struct device *dev, struct videobuf_dmabuf *dma)
252 255
253 if (dma->pages) { 256 if (dma->pages) {
254 dma->sglist = videobuf_pages_to_sg(dma->pages, dma->nr_pages, 257 dma->sglist = videobuf_pages_to_sg(dma->pages, dma->nr_pages,
255 dma->offset); 258 dma->offset, dma->size);
256 } 259 }
257 if (dma->vaddr) { 260 if (dma->vaddr) {
258 dma->sglist = videobuf_vmalloc_to_sg(dma->vaddr, 261 dma->sglist = videobuf_vmalloc_to_sg(dma->vaddr,
diff --git a/drivers/mfd/max8925-core.c b/drivers/mfd/max8925-core.c
index 04028a9ee082..428377a5a6f5 100644
--- a/drivers/mfd/max8925-core.c
+++ b/drivers/mfd/max8925-core.c
@@ -429,24 +429,25 @@ static void max8925_irq_sync_unlock(unsigned int irq)
429 irq_tsc = cache_tsc; 429 irq_tsc = cache_tsc;
430 for (i = 0; i < ARRAY_SIZE(max8925_irqs); i++) { 430 for (i = 0; i < ARRAY_SIZE(max8925_irqs); i++) {
431 irq_data = &max8925_irqs[i]; 431 irq_data = &max8925_irqs[i];
432 /* 1 -- disable, 0 -- enable */
432 switch (irq_data->mask_reg) { 433 switch (irq_data->mask_reg) {
433 case MAX8925_CHG_IRQ1_MASK: 434 case MAX8925_CHG_IRQ1_MASK:
434 irq_chg[0] &= irq_data->enable; 435 irq_chg[0] &= ~irq_data->enable;
435 break; 436 break;
436 case MAX8925_CHG_IRQ2_MASK: 437 case MAX8925_CHG_IRQ2_MASK:
437 irq_chg[1] &= irq_data->enable; 438 irq_chg[1] &= ~irq_data->enable;
438 break; 439 break;
439 case MAX8925_ON_OFF_IRQ1_MASK: 440 case MAX8925_ON_OFF_IRQ1_MASK:
440 irq_on[0] &= irq_data->enable; 441 irq_on[0] &= ~irq_data->enable;
441 break; 442 break;
442 case MAX8925_ON_OFF_IRQ2_MASK: 443 case MAX8925_ON_OFF_IRQ2_MASK:
443 irq_on[1] &= irq_data->enable; 444 irq_on[1] &= ~irq_data->enable;
444 break; 445 break;
445 case MAX8925_RTC_IRQ_MASK: 446 case MAX8925_RTC_IRQ_MASK:
446 irq_rtc &= irq_data->enable; 447 irq_rtc &= ~irq_data->enable;
447 break; 448 break;
448 case MAX8925_TSC_IRQ_MASK: 449 case MAX8925_TSC_IRQ_MASK:
449 irq_tsc &= irq_data->enable; 450 irq_tsc &= ~irq_data->enable;
450 break; 451 break;
451 default: 452 default:
452 dev_err(chip->dev, "wrong IRQ\n"); 453 dev_err(chip->dev, "wrong IRQ\n");
diff --git a/drivers/mfd/wm831x-irq.c b/drivers/mfd/wm831x-irq.c
index 7dabe4dbd373..294183b6260b 100644
--- a/drivers/mfd/wm831x-irq.c
+++ b/drivers/mfd/wm831x-irq.c
@@ -394,8 +394,13 @@ static int wm831x_irq_set_type(unsigned int irq, unsigned int type)
394 394
395 irq = irq - wm831x->irq_base; 395 irq = irq - wm831x->irq_base;
396 396
397 if (irq < WM831X_IRQ_GPIO_1 || irq > WM831X_IRQ_GPIO_11) 397 if (irq < WM831X_IRQ_GPIO_1 || irq > WM831X_IRQ_GPIO_11) {
398 return -EINVAL; 398 /* Ignore internal-only IRQs */
399 if (irq >= 0 && irq < WM831X_NUM_IRQS)
400 return 0;
401 else
402 return -EINVAL;
403 }
399 404
400 switch (type) { 405 switch (type) {
401 case IRQ_TYPE_EDGE_BOTH: 406 case IRQ_TYPE_EDGE_BOTH:
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 0b591b658243..b74331260744 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -368,7 +368,7 @@ config VMWARE_BALLOON
368 If unsure, say N. 368 If unsure, say N.
369 369
370 To compile this driver as a module, choose M here: the 370 To compile this driver as a module, choose M here: the
371 module will be called vmware_balloon. 371 module will be called vmw_balloon.
372 372
373config ARM_CHARLCD 373config ARM_CHARLCD
374 bool "ARM Ltd. Character LCD Driver" 374 bool "ARM Ltd. Character LCD Driver"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 255a80dc9d73..42eab95cde2a 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -33,5 +33,5 @@ obj-$(CONFIG_IWMC3200TOP) += iwmc3200top/
33obj-$(CONFIG_HMC6352) += hmc6352.o 33obj-$(CONFIG_HMC6352) += hmc6352.o
34obj-y += eeprom/ 34obj-y += eeprom/
35obj-y += cb710/ 35obj-y += cb710/
36obj-$(CONFIG_VMWARE_BALLOON) += vmware_balloon.o 36obj-$(CONFIG_VMWARE_BALLOON) += vmw_balloon.o
37obj-$(CONFIG_ARM_CHARLCD) += arm-charlcd.o 37obj-$(CONFIG_ARM_CHARLCD) += arm-charlcd.o
diff --git a/drivers/misc/bh1780gli.c b/drivers/misc/bh1780gli.c
index 714c6b487313..d5f3a3fd2319 100644
--- a/drivers/misc/bh1780gli.c
+++ b/drivers/misc/bh1780gli.c
@@ -190,7 +190,6 @@ static int __devexit bh1780_remove(struct i2c_client *client)
190 190
191 ddata = i2c_get_clientdata(client); 191 ddata = i2c_get_clientdata(client);
192 sysfs_remove_group(&client->dev.kobj, &bh1780_attr_group); 192 sysfs_remove_group(&client->dev.kobj, &bh1780_attr_group);
193 i2c_set_clientdata(client, NULL);
194 kfree(ddata); 193 kfree(ddata);
195 194
196 return 0; 195 return 0;
diff --git a/drivers/misc/vmware_balloon.c b/drivers/misc/vmw_balloon.c
index 2a1e804a71aa..2a1e804a71aa 100644
--- a/drivers/misc/vmware_balloon.c
+++ b/drivers/misc/vmw_balloon.c
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index bd2755e8d9a3..f332c52968b7 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -362,9 +362,8 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
362 goto err; 362 goto err;
363 } 363 }
364 364
365 err = mmc_sd_get_cid(host, host->ocr & ocr, card->raw_cid); 365 if (ocr & R4_MEMORY_PRESENT
366 366 && mmc_sd_get_cid(host, host->ocr & ocr, card->raw_cid) == 0) {
367 if (!err) {
368 card->type = MMC_TYPE_SD_COMBO; 367 card->type = MMC_TYPE_SD_COMBO;
369 368
370 if (oldcard && (oldcard->type != MMC_TYPE_SD_COMBO || 369 if (oldcard && (oldcard->type != MMC_TYPE_SD_COMBO ||
diff --git a/drivers/mmc/host/at91_mci.c b/drivers/mmc/host/at91_mci.c
index 5f3a599ead07..87226cd202a5 100644
--- a/drivers/mmc/host/at91_mci.c
+++ b/drivers/mmc/host/at91_mci.c
@@ -66,6 +66,7 @@
66#include <linux/clk.h> 66#include <linux/clk.h>
67#include <linux/atmel_pdc.h> 67#include <linux/atmel_pdc.h>
68#include <linux/gfp.h> 68#include <linux/gfp.h>
69#include <linux/highmem.h>
69 70
70#include <linux/mmc/host.h> 71#include <linux/mmc/host.h>
71 72
diff --git a/drivers/mmc/host/imxmmc.c b/drivers/mmc/host/imxmmc.c
index 9a68ff4353a2..5a950b16d9e6 100644
--- a/drivers/mmc/host/imxmmc.c
+++ b/drivers/mmc/host/imxmmc.c
@@ -148,11 +148,12 @@ static int imxmci_start_clock(struct imxmci_host *host)
148 148
149 while (delay--) { 149 while (delay--) {
150 reg = readw(host->base + MMC_REG_STATUS); 150 reg = readw(host->base + MMC_REG_STATUS);
151 if (reg & STATUS_CARD_BUS_CLK_RUN) 151 if (reg & STATUS_CARD_BUS_CLK_RUN) {
152 /* Check twice before cut */ 152 /* Check twice before cut */
153 reg = readw(host->base + MMC_REG_STATUS); 153 reg = readw(host->base + MMC_REG_STATUS);
154 if (reg & STATUS_CARD_BUS_CLK_RUN) 154 if (reg & STATUS_CARD_BUS_CLK_RUN)
155 return 0; 155 return 0;
156 }
156 157
157 if (test_bit(IMXMCI_PEND_STARTED_b, &host->pending_events)) 158 if (test_bit(IMXMCI_PEND_STARTED_b, &host->pending_events))
158 return 0; 159 return 0;
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 4a8776f8afdd..4526d2791f29 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -2305,7 +2305,6 @@ static int omap_hsmmc_suspend(struct device *dev)
2305 int ret = 0; 2305 int ret = 0;
2306 struct platform_device *pdev = to_platform_device(dev); 2306 struct platform_device *pdev = to_platform_device(dev);
2307 struct omap_hsmmc_host *host = platform_get_drvdata(pdev); 2307 struct omap_hsmmc_host *host = platform_get_drvdata(pdev);
2308 pm_message_t state = PMSG_SUSPEND; /* unused by MMC core */
2309 2308
2310 if (host && host->suspended) 2309 if (host && host->suspended)
2311 return 0; 2310 return 0;
@@ -2324,8 +2323,8 @@ static int omap_hsmmc_suspend(struct device *dev)
2324 } 2323 }
2325 } 2324 }
2326 cancel_work_sync(&host->mmc_carddetect_work); 2325 cancel_work_sync(&host->mmc_carddetect_work);
2327 mmc_host_enable(host->mmc);
2328 ret = mmc_suspend_host(host->mmc); 2326 ret = mmc_suspend_host(host->mmc);
2327 mmc_host_enable(host->mmc);
2329 if (ret == 0) { 2328 if (ret == 0) {
2330 omap_hsmmc_disable_irq(host); 2329 omap_hsmmc_disable_irq(host);
2331 OMAP_HSMMC_WRITE(host->base, HCTL, 2330 OMAP_HSMMC_WRITE(host->base, HCTL,
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index 2e16e0a90a5e..976330de379e 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -1600,7 +1600,7 @@ static int __devinit s3cmci_probe(struct platform_device *pdev)
1600 host->pio_active = XFER_NONE; 1600 host->pio_active = XFER_NONE;
1601 1601
1602#ifdef CONFIG_MMC_S3C_PIODMA 1602#ifdef CONFIG_MMC_S3C_PIODMA
1603 host->dodma = host->pdata->dma; 1603 host->dodma = host->pdata->use_dma;
1604#endif 1604#endif
1605 1605
1606 host->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1606 host->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 71ad4163b95e..aacb862ecc8a 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -241,8 +241,10 @@ static struct sdhci_ops sdhci_s3c_ops = {
241static void sdhci_s3c_notify_change(struct platform_device *dev, int state) 241static void sdhci_s3c_notify_change(struct platform_device *dev, int state)
242{ 242{
243 struct sdhci_host *host = platform_get_drvdata(dev); 243 struct sdhci_host *host = platform_get_drvdata(dev);
244 unsigned long flags;
245
244 if (host) { 246 if (host) {
245 spin_lock(&host->lock); 247 spin_lock_irqsave(&host->lock, flags);
246 if (state) { 248 if (state) {
247 dev_dbg(&dev->dev, "card inserted.\n"); 249 dev_dbg(&dev->dev, "card inserted.\n");
248 host->flags &= ~SDHCI_DEVICE_DEAD; 250 host->flags &= ~SDHCI_DEVICE_DEAD;
@@ -253,7 +255,7 @@ static void sdhci_s3c_notify_change(struct platform_device *dev, int state)
253 host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; 255 host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
254 } 256 }
255 tasklet_schedule(&host->card_tasklet); 257 tasklet_schedule(&host->card_tasklet);
256 spin_unlock(&host->lock); 258 spin_unlock_irqrestore(&host->lock, flags);
257 } 259 }
258} 260}
259 261
@@ -481,8 +483,10 @@ static int __devexit sdhci_s3c_remove(struct platform_device *pdev)
481 sdhci_remove_host(host, 1); 483 sdhci_remove_host(host, 1);
482 484
483 for (ptr = 0; ptr < 3; ptr++) { 485 for (ptr = 0; ptr < 3; ptr++) {
484 clk_disable(sc->clk_bus[ptr]); 486 if (sc->clk_bus[ptr]) {
485 clk_put(sc->clk_bus[ptr]); 487 clk_disable(sc->clk_bus[ptr]);
488 clk_put(sc->clk_bus[ptr]);
489 }
486 } 490 }
487 clk_disable(sc->clk_io); 491 clk_disable(sc->clk_io);
488 clk_put(sc->clk_io); 492 clk_put(sc->clk_io);
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index ee7d0a5a51c4..69d98e3bf6ab 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -164,6 +164,7 @@ tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd)
164static void tmio_mmc_pio_irq(struct tmio_mmc_host *host) 164static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
165{ 165{
166 struct mmc_data *data = host->data; 166 struct mmc_data *data = host->data;
167 void *sg_virt;
167 unsigned short *buf; 168 unsigned short *buf;
168 unsigned int count; 169 unsigned int count;
169 unsigned long flags; 170 unsigned long flags;
@@ -173,8 +174,8 @@ static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
173 return; 174 return;
174 } 175 }
175 176
176 buf = (unsigned short *)(tmio_mmc_kmap_atomic(host, &flags) + 177 sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags);
177 host->sg_off); 178 buf = (unsigned short *)(sg_virt + host->sg_off);
178 179
179 count = host->sg_ptr->length - host->sg_off; 180 count = host->sg_ptr->length - host->sg_off;
180 if (count > data->blksz) 181 if (count > data->blksz)
@@ -191,7 +192,7 @@ static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
191 192
192 host->sg_off += count; 193 host->sg_off += count;
193 194
194 tmio_mmc_kunmap_atomic(host, &flags); 195 tmio_mmc_kunmap_atomic(sg_virt, &flags);
195 196
196 if (host->sg_off == host->sg_ptr->length) 197 if (host->sg_off == host->sg_ptr->length)
197 tmio_mmc_next_sg(host); 198 tmio_mmc_next_sg(host);
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index 64f7d5dfc106..0fedc78e3ea5 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -82,10 +82,7 @@
82 82
83#define ack_mmc_irqs(host, i) \ 83#define ack_mmc_irqs(host, i) \
84 do { \ 84 do { \
85 u32 mask;\ 85 sd_ctrl_write32((host), CTL_STATUS, ~(i)); \
86 mask = sd_ctrl_read32((host), CTL_STATUS); \
87 mask &= ~((i) & TMIO_MASK_IRQ); \
88 sd_ctrl_write32((host), CTL_STATUS, mask); \
89 } while (0) 86 } while (0)
90 87
91 88
@@ -177,19 +174,17 @@ static inline int tmio_mmc_next_sg(struct tmio_mmc_host *host)
177 return --host->sg_len; 174 return --host->sg_len;
178} 175}
179 176
180static inline char *tmio_mmc_kmap_atomic(struct tmio_mmc_host *host, 177static inline char *tmio_mmc_kmap_atomic(struct scatterlist *sg,
181 unsigned long *flags) 178 unsigned long *flags)
182{ 179{
183 struct scatterlist *sg = host->sg_ptr;
184
185 local_irq_save(*flags); 180 local_irq_save(*flags);
186 return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset; 181 return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
187} 182}
188 183
189static inline void tmio_mmc_kunmap_atomic(struct tmio_mmc_host *host, 184static inline void tmio_mmc_kunmap_atomic(void *virt,
190 unsigned long *flags) 185 unsigned long *flags)
191{ 186{
192 kunmap_atomic(sg_page(host->sg_ptr), KM_BIO_SRC_IRQ); 187 kunmap_atomic(virt, KM_BIO_SRC_IRQ);
193 local_irq_restore(*flags); 188 local_irq_restore(*flags);
194} 189}
195 190
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index a382e3dd0a5d..6fbeefa3a766 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -682,7 +682,6 @@ static int __devinit bf5xx_nand_add_partition(struct bf5xx_nand_info *info)
682static int __devexit bf5xx_nand_remove(struct platform_device *pdev) 682static int __devexit bf5xx_nand_remove(struct platform_device *pdev)
683{ 683{
684 struct bf5xx_nand_info *info = to_nand_info(pdev); 684 struct bf5xx_nand_info *info = to_nand_info(pdev);
685 struct mtd_info *mtd = NULL;
686 685
687 platform_set_drvdata(pdev, NULL); 686 platform_set_drvdata(pdev, NULL);
688 687
@@ -690,11 +689,7 @@ static int __devexit bf5xx_nand_remove(struct platform_device *pdev)
690 * and their partitions, then go through freeing the 689 * and their partitions, then go through freeing the
691 * resources used 690 * resources used
692 */ 691 */
693 mtd = &info->mtd; 692 nand_release(&info->mtd);
694 if (mtd) {
695 nand_release(mtd);
696 kfree(mtd);
697 }
698 693
699 peripheral_free_list(bfin_nfc_pin_req); 694 peripheral_free_list(bfin_nfc_pin_req);
700 bf5xx_nand_dma_remove(info); 695 bf5xx_nand_dma_remove(info);
@@ -710,7 +705,7 @@ static int bf5xx_nand_scan(struct mtd_info *mtd)
710 struct nand_chip *chip = mtd->priv; 705 struct nand_chip *chip = mtd->priv;
711 int ret; 706 int ret;
712 707
713 ret = nand_scan_ident(mtd, 1); 708 ret = nand_scan_ident(mtd, 1, NULL);
714 if (ret) 709 if (ret)
715 return ret; 710 return ret;
716 711
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index fcf8ceb277d4..b2828e84d243 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -67,7 +67,9 @@
67#define NFC_V1_V2_CONFIG1_BIG (1 << 5) 67#define NFC_V1_V2_CONFIG1_BIG (1 << 5)
68#define NFC_V1_V2_CONFIG1_RST (1 << 6) 68#define NFC_V1_V2_CONFIG1_RST (1 << 6)
69#define NFC_V1_V2_CONFIG1_CE (1 << 7) 69#define NFC_V1_V2_CONFIG1_CE (1 << 7)
70#define NFC_V1_V2_CONFIG1_ONE_CYCLE (1 << 8) 70#define NFC_V2_CONFIG1_ONE_CYCLE (1 << 8)
71#define NFC_V2_CONFIG1_PPB(x) (((x) & 0x3) << 9)
72#define NFC_V2_CONFIG1_FP_INT (1 << 11)
71 73
72#define NFC_V1_V2_CONFIG2_INT (1 << 15) 74#define NFC_V1_V2_CONFIG2_INT (1 << 15)
73 75
@@ -402,16 +404,16 @@ static void send_read_id_v1_v2(struct mxc_nand_host *host)
402 /* Wait for operation to complete */ 404 /* Wait for operation to complete */
403 wait_op_done(host, true); 405 wait_op_done(host, true);
404 406
407 memcpy(host->data_buf, host->main_area0, 16);
408
405 if (this->options & NAND_BUSWIDTH_16) { 409 if (this->options & NAND_BUSWIDTH_16) {
406 void __iomem *main_buf = host->main_area0;
407 /* compress the ID info */ 410 /* compress the ID info */
408 writeb(readb(main_buf + 2), main_buf + 1); 411 host->data_buf[1] = host->data_buf[2];
409 writeb(readb(main_buf + 4), main_buf + 2); 412 host->data_buf[2] = host->data_buf[4];
410 writeb(readb(main_buf + 6), main_buf + 3); 413 host->data_buf[3] = host->data_buf[6];
411 writeb(readb(main_buf + 8), main_buf + 4); 414 host->data_buf[4] = host->data_buf[8];
412 writeb(readb(main_buf + 10), main_buf + 5); 415 host->data_buf[5] = host->data_buf[10];
413 } 416 }
414 memcpy(host->data_buf, host->main_area0, 16);
415} 417}
416 418
417static uint16_t get_dev_status_v3(struct mxc_nand_host *host) 419static uint16_t get_dev_status_v3(struct mxc_nand_host *host)
@@ -729,27 +731,30 @@ static void preset_v1_v2(struct mtd_info *mtd)
729{ 731{
730 struct nand_chip *nand_chip = mtd->priv; 732 struct nand_chip *nand_chip = mtd->priv;
731 struct mxc_nand_host *host = nand_chip->priv; 733 struct mxc_nand_host *host = nand_chip->priv;
732 uint16_t tmp; 734 uint16_t config1 = 0;
733 735
734 /* enable interrupt, disable spare enable */ 736 if (nand_chip->ecc.mode == NAND_ECC_HW)
735 tmp = readw(NFC_V1_V2_CONFIG1); 737 config1 |= NFC_V1_V2_CONFIG1_ECC_EN;
736 tmp &= ~NFC_V1_V2_CONFIG1_INT_MSK; 738
737 tmp &= ~NFC_V1_V2_CONFIG1_SP_EN; 739 if (nfc_is_v21())
738 if (nand_chip->ecc.mode == NAND_ECC_HW) { 740 config1 |= NFC_V2_CONFIG1_FP_INT;
739 tmp |= NFC_V1_V2_CONFIG1_ECC_EN; 741
740 } else { 742 if (!cpu_is_mx21())
741 tmp &= ~NFC_V1_V2_CONFIG1_ECC_EN; 743 config1 |= NFC_V1_V2_CONFIG1_INT_MSK;
742 }
743 744
744 if (nfc_is_v21() && mtd->writesize) { 745 if (nfc_is_v21() && mtd->writesize) {
746 uint16_t pages_per_block = mtd->erasesize / mtd->writesize;
747
745 host->eccsize = get_eccsize(mtd); 748 host->eccsize = get_eccsize(mtd);
746 if (host->eccsize == 4) 749 if (host->eccsize == 4)
747 tmp |= NFC_V2_CONFIG1_ECC_MODE_4; 750 config1 |= NFC_V2_CONFIG1_ECC_MODE_4;
751
752 config1 |= NFC_V2_CONFIG1_PPB(ffs(pages_per_block) - 6);
748 } else { 753 } else {
749 host->eccsize = 1; 754 host->eccsize = 1;
750 } 755 }
751 756
752 writew(tmp, NFC_V1_V2_CONFIG1); 757 writew(config1, NFC_V1_V2_CONFIG1);
753 /* preset operation */ 758 /* preset operation */
754 759
755 /* Unlock the internal RAM Buffer */ 760 /* Unlock the internal RAM Buffer */
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 133d51528f8d..513e0a76a4a7 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -413,7 +413,7 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
413 prefetch_status = gpmc_read_status(GPMC_PREFETCH_COUNT); 413 prefetch_status = gpmc_read_status(GPMC_PREFETCH_COUNT);
414 } while (prefetch_status); 414 } while (prefetch_status);
415 /* disable and stop the PFPW engine */ 415 /* disable and stop the PFPW engine */
416 gpmc_prefetch_reset(); 416 gpmc_prefetch_reset(info->gpmc_cs);
417 417
418 dma_unmap_single(&info->pdev->dev, dma_addr, len, dir); 418 dma_unmap_single(&info->pdev->dev, dma_addr, len, dir);
419 return 0; 419 return 0;
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 4d89f3780207..4d01cda68844 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -1320,6 +1320,7 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
1320 goto fail_free_irq; 1320 goto fail_free_irq;
1321 } 1321 }
1322 1322
1323#ifdef CONFIG_MTD_PARTITIONS
1323 if (mtd_has_cmdlinepart()) { 1324 if (mtd_has_cmdlinepart()) {
1324 static const char *probes[] = { "cmdlinepart", NULL }; 1325 static const char *probes[] = { "cmdlinepart", NULL };
1325 struct mtd_partition *parts; 1326 struct mtd_partition *parts;
@@ -1332,6 +1333,9 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
1332 } 1333 }
1333 1334
1334 return add_mtd_partitions(mtd, pdata->parts, pdata->nr_parts); 1335 return add_mtd_partitions(mtd, pdata->parts, pdata->nr_parts);
1336#else
1337 return 0;
1338#endif
1335 1339
1336fail_free_irq: 1340fail_free_irq:
1337 free_irq(irq, info); 1341 free_irq(irq, info);
@@ -1364,7 +1368,9 @@ static int pxa3xx_nand_remove(struct platform_device *pdev)
1364 platform_set_drvdata(pdev, NULL); 1368 platform_set_drvdata(pdev, NULL);
1365 1369
1366 del_mtd_device(mtd); 1370 del_mtd_device(mtd);
1371#ifdef CONFIG_MTD_PARTITIONS
1367 del_mtd_partitions(mtd); 1372 del_mtd_partitions(mtd);
1373#endif
1368 irq = platform_get_irq(pdev, 0); 1374 irq = platform_get_irq(pdev, 0);
1369 if (irq >= 0) 1375 if (irq >= 0)
1370 free_irq(irq, info); 1376 free_irq(irq, info);
diff --git a/drivers/mtd/onenand/samsung.c b/drivers/mtd/onenand/samsung.c
index cb443af3d45f..a460f1b748c2 100644
--- a/drivers/mtd/onenand/samsung.c
+++ b/drivers/mtd/onenand/samsung.c
@@ -554,14 +554,13 @@ static int s5pc110_dma_ops(void *dst, void *src, size_t count, int direction)
554 554
555 do { 555 do {
556 status = readl(base + S5PC110_DMA_TRANS_STATUS); 556 status = readl(base + S5PC110_DMA_TRANS_STATUS);
557 if (status & S5PC110_DMA_TRANS_STATUS_TE) {
558 writel(S5PC110_DMA_TRANS_CMD_TEC,
559 base + S5PC110_DMA_TRANS_CMD);
560 return -EIO;
561 }
557 } while (!(status & S5PC110_DMA_TRANS_STATUS_TD)); 562 } while (!(status & S5PC110_DMA_TRANS_STATUS_TD));
558 563
559 if (status & S5PC110_DMA_TRANS_STATUS_TE) {
560 writel(S5PC110_DMA_TRANS_CMD_TEC, base + S5PC110_DMA_TRANS_CMD);
561 writel(S5PC110_DMA_TRANS_CMD_TDC, base + S5PC110_DMA_TRANS_CMD);
562 return -EIO;
563 }
564
565 writel(S5PC110_DMA_TRANS_CMD_TDC, base + S5PC110_DMA_TRANS_CMD); 564 writel(S5PC110_DMA_TRANS_CMD_TDC, base + S5PC110_DMA_TRANS_CMD);
566 565
567 return 0; 566 return 0;
@@ -571,13 +570,12 @@ static int s5pc110_read_bufferram(struct mtd_info *mtd, int area,
571 unsigned char *buffer, int offset, size_t count) 570 unsigned char *buffer, int offset, size_t count)
572{ 571{
573 struct onenand_chip *this = mtd->priv; 572 struct onenand_chip *this = mtd->priv;
574 void __iomem *bufferram;
575 void __iomem *p; 573 void __iomem *p;
576 void *buf = (void *) buffer; 574 void *buf = (void *) buffer;
577 dma_addr_t dma_src, dma_dst; 575 dma_addr_t dma_src, dma_dst;
578 int err; 576 int err;
579 577
580 p = bufferram = this->base + area; 578 p = this->base + area;
581 if (ONENAND_CURRENT_BUFFERRAM(this)) { 579 if (ONENAND_CURRENT_BUFFERRAM(this)) {
582 if (area == ONENAND_DATARAM) 580 if (area == ONENAND_DATARAM)
583 p += this->writesize; 581 p += this->writesize;
@@ -621,7 +619,7 @@ static int s5pc110_read_bufferram(struct mtd_info *mtd, int area,
621normal: 619normal:
622 if (count != mtd->writesize) { 620 if (count != mtd->writesize) {
623 /* Copy the bufferram to memory to prevent unaligned access */ 621 /* Copy the bufferram to memory to prevent unaligned access */
624 memcpy(this->page_buf, bufferram, mtd->writesize); 622 memcpy(this->page_buf, p, mtd->writesize);
625 p = this->page_buf + offset; 623 p = this->page_buf + offset;
626 } 624 }
627 625
diff --git a/drivers/mtd/ubi/Kconfig.debug b/drivers/mtd/ubi/Kconfig.debug
index 2246f154e2f7..61f6e5e40458 100644
--- a/drivers/mtd/ubi/Kconfig.debug
+++ b/drivers/mtd/ubi/Kconfig.debug
@@ -6,7 +6,7 @@ config MTD_UBI_DEBUG
6 depends on SYSFS 6 depends on SYSFS
7 depends on MTD_UBI 7 depends on MTD_UBI
8 select DEBUG_FS 8 select DEBUG_FS
9 select KALLSYMS_ALL 9 select KALLSYMS_ALL if KALLSYMS && DEBUG_KERNEL
10 help 10 help
11 This option enables UBI debugging. 11 This option enables UBI debugging.
12 12
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index 4dfa6b90c21c..3d2d1a69e9a0 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -798,18 +798,18 @@ static int rename_volumes(struct ubi_device *ubi,
798 goto out_free; 798 goto out_free;
799 } 799 }
800 800
801 re = kzalloc(sizeof(struct ubi_rename_entry), GFP_KERNEL); 801 re1 = kzalloc(sizeof(struct ubi_rename_entry), GFP_KERNEL);
802 if (!re) { 802 if (!re1) {
803 err = -ENOMEM; 803 err = -ENOMEM;
804 ubi_close_volume(desc); 804 ubi_close_volume(desc);
805 goto out_free; 805 goto out_free;
806 } 806 }
807 807
808 re->remove = 1; 808 re1->remove = 1;
809 re->desc = desc; 809 re1->desc = desc;
810 list_add(&re->list, &rename_list); 810 list_add(&re1->list, &rename_list);
811 dbg_msg("will remove volume %d, name \"%s\"", 811 dbg_msg("will remove volume %d, name \"%s\"",
812 re->desc->vol->vol_id, re->desc->vol->name); 812 re1->desc->vol->vol_id, re1->desc->vol->name);
813 } 813 }
814 814
815 mutex_lock(&ubi->device_mutex); 815 mutex_lock(&ubi->device_mutex);
diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c
index 372a15ac9995..69b52e9c9489 100644
--- a/drivers/mtd/ubi/scan.c
+++ b/drivers/mtd/ubi/scan.c
@@ -843,7 +843,7 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
843 case UBI_COMPAT_DELETE: 843 case UBI_COMPAT_DELETE:
844 ubi_msg("\"delete\" compatible internal volume %d:%d" 844 ubi_msg("\"delete\" compatible internal volume %d:%d"
845 " found, will remove it", vol_id, lnum); 845 " found, will remove it", vol_id, lnum);
846 err = add_to_list(si, pnum, ec, &si->corr); 846 err = add_to_list(si, pnum, ec, &si->erase);
847 if (err) 847 if (err)
848 return err; 848 return err;
849 return 0; 849 return 0;
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index ee7b1d8fbb92..97a435672eaf 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -1212,7 +1212,8 @@ int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
1212retry: 1212retry:
1213 spin_lock(&ubi->wl_lock); 1213 spin_lock(&ubi->wl_lock);
1214 e = ubi->lookuptbl[pnum]; 1214 e = ubi->lookuptbl[pnum];
1215 if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub)) { 1215 if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub) ||
1216 in_wl_tree(e, &ubi->erroneous)) {
1216 spin_unlock(&ubi->wl_lock); 1217 spin_unlock(&ubi->wl_lock);
1217 return 0; 1218 return 0;
1218 } 1219 }
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index c754d88e5ec9..179871d9e71f 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -633,7 +633,11 @@ struct vortex_private {
633 open:1, 633 open:1,
634 medialock:1, 634 medialock:1,
635 must_free_region:1, /* Flag: if zero, Cardbus owns the I/O region */ 635 must_free_region:1, /* Flag: if zero, Cardbus owns the I/O region */
636 large_frames:1; /* accept large frames */ 636 large_frames:1, /* accept large frames */
637 handling_irq:1; /* private in_irq indicator */
638 /* {get|set}_wol operations are already serialized by rtnl.
639 * no additional locking is required for the enable_wol and acpi_set_WOL()
640 */
637 int drv_flags; 641 int drv_flags;
638 u16 status_enable; 642 u16 status_enable;
639 u16 intr_enable; 643 u16 intr_enable;
@@ -646,7 +650,7 @@ struct vortex_private {
646 u16 io_size; /* Size of PCI region (for release_region) */ 650 u16 io_size; /* Size of PCI region (for release_region) */
647 651
648 /* Serialises access to hardware other than MII and variables below. 652 /* Serialises access to hardware other than MII and variables below.
649 * The lock hierarchy is rtnl_lock > lock > mii_lock > window_lock. */ 653 * The lock hierarchy is rtnl_lock > {lock, mii_lock} > window_lock. */
650 spinlock_t lock; 654 spinlock_t lock;
651 655
652 spinlock_t mii_lock; /* Serialises access to MII */ 656 spinlock_t mii_lock; /* Serialises access to MII */
@@ -1993,10 +1997,9 @@ vortex_error(struct net_device *dev, int status)
1993 } 1997 }
1994 } 1998 }
1995 1999
1996 if (status & RxEarly) { /* Rx early is unused. */ 2000 if (status & RxEarly) /* Rx early is unused. */
1997 vortex_rx(dev);
1998 iowrite16(AckIntr | RxEarly, ioaddr + EL3_CMD); 2001 iowrite16(AckIntr | RxEarly, ioaddr + EL3_CMD);
1999 } 2002
2000 if (status & StatsFull) { /* Empty statistics. */ 2003 if (status & StatsFull) { /* Empty statistics. */
2001 static int DoneDidThat; 2004 static int DoneDidThat;
2002 if (vortex_debug > 4) 2005 if (vortex_debug > 4)
@@ -2133,6 +2136,15 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
2133 dev->name, vp->cur_tx); 2136 dev->name, vp->cur_tx);
2134 } 2137 }
2135 2138
2139 /*
2140 * We can't allow a recursion from our interrupt handler back into the
2141 * tx routine, as they take the same spin lock, and that causes
2142 * deadlock. Just return NETDEV_TX_BUSY and let the stack try again in
2143 * a bit
2144 */
2145 if (vp->handling_irq)
2146 return NETDEV_TX_BUSY;
2147
2136 if (vp->cur_tx - vp->dirty_tx >= TX_RING_SIZE) { 2148 if (vp->cur_tx - vp->dirty_tx >= TX_RING_SIZE) {
2137 if (vortex_debug > 0) 2149 if (vortex_debug > 0)
2138 pr_warning("%s: BUG! Tx Ring full, refusing to send buffer.\n", 2150 pr_warning("%s: BUG! Tx Ring full, refusing to send buffer.\n",
@@ -2288,7 +2300,12 @@ vortex_interrupt(int irq, void *dev_id)
2288 if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq)) { 2300 if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq)) {
2289 if (status == 0xffff) 2301 if (status == 0xffff)
2290 break; 2302 break;
2303 if (status & RxEarly)
2304 vortex_rx(dev);
2305 spin_unlock(&vp->window_lock);
2291 vortex_error(dev, status); 2306 vortex_error(dev, status);
2307 spin_lock(&vp->window_lock);
2308 window_set(vp, 7);
2292 } 2309 }
2293 2310
2294 if (--work_done < 0) { 2311 if (--work_done < 0) {
@@ -2335,11 +2352,13 @@ boomerang_interrupt(int irq, void *dev_id)
2335 2352
2336 ioaddr = vp->ioaddr; 2353 ioaddr = vp->ioaddr;
2337 2354
2355
2338 /* 2356 /*
2339 * It seems dopey to put the spinlock this early, but we could race against vortex_tx_timeout 2357 * It seems dopey to put the spinlock this early, but we could race against vortex_tx_timeout
2340 * and boomerang_start_xmit 2358 * and boomerang_start_xmit
2341 */ 2359 */
2342 spin_lock(&vp->lock); 2360 spin_lock(&vp->lock);
2361 vp->handling_irq = 1;
2343 2362
2344 status = ioread16(ioaddr + EL3_STATUS); 2363 status = ioread16(ioaddr + EL3_STATUS);
2345 2364
@@ -2447,6 +2466,7 @@ boomerang_interrupt(int irq, void *dev_id)
2447 pr_debug("%s: exiting interrupt, status %4.4x.\n", 2466 pr_debug("%s: exiting interrupt, status %4.4x.\n",
2448 dev->name, status); 2467 dev->name, status);
2449handler_exit: 2468handler_exit:
2469 vp->handling_irq = 0;
2450 spin_unlock(&vp->lock); 2470 spin_unlock(&vp->lock);
2451 return IRQ_HANDLED; 2471 return IRQ_HANDLED;
2452} 2472}
@@ -2922,28 +2942,31 @@ static void vortex_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2922{ 2942{
2923 struct vortex_private *vp = netdev_priv(dev); 2943 struct vortex_private *vp = netdev_priv(dev);
2924 2944
2925 spin_lock_irq(&vp->lock); 2945 if (!VORTEX_PCI(vp))
2946 return;
2947
2926 wol->supported = WAKE_MAGIC; 2948 wol->supported = WAKE_MAGIC;
2927 2949
2928 wol->wolopts = 0; 2950 wol->wolopts = 0;
2929 if (vp->enable_wol) 2951 if (vp->enable_wol)
2930 wol->wolopts |= WAKE_MAGIC; 2952 wol->wolopts |= WAKE_MAGIC;
2931 spin_unlock_irq(&vp->lock);
2932} 2953}
2933 2954
2934static int vortex_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 2955static int vortex_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2935{ 2956{
2936 struct vortex_private *vp = netdev_priv(dev); 2957 struct vortex_private *vp = netdev_priv(dev);
2958
2959 if (!VORTEX_PCI(vp))
2960 return -EOPNOTSUPP;
2961
2937 if (wol->wolopts & ~WAKE_MAGIC) 2962 if (wol->wolopts & ~WAKE_MAGIC)
2938 return -EINVAL; 2963 return -EINVAL;
2939 2964
2940 spin_lock_irq(&vp->lock);
2941 if (wol->wolopts & WAKE_MAGIC) 2965 if (wol->wolopts & WAKE_MAGIC)
2942 vp->enable_wol = 1; 2966 vp->enable_wol = 1;
2943 else 2967 else
2944 vp->enable_wol = 0; 2968 vp->enable_wol = 0;
2945 acpi_set_WOL(dev); 2969 acpi_set_WOL(dev);
2946 spin_unlock_irq(&vp->lock);
2947 2970
2948 return 0; 2971 return 0;
2949} 2972}
@@ -2971,7 +2994,6 @@ static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2971{ 2994{
2972 int err; 2995 int err;
2973 struct vortex_private *vp = netdev_priv(dev); 2996 struct vortex_private *vp = netdev_priv(dev);
2974 unsigned long flags;
2975 pci_power_t state = 0; 2997 pci_power_t state = 0;
2976 2998
2977 if(VORTEX_PCI(vp)) 2999 if(VORTEX_PCI(vp))
@@ -2981,9 +3003,7 @@ static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2981 3003
2982 if(state != 0) 3004 if(state != 0)
2983 pci_set_power_state(VORTEX_PCI(vp), PCI_D0); 3005 pci_set_power_state(VORTEX_PCI(vp), PCI_D0);
2984 spin_lock_irqsave(&vp->lock, flags);
2985 err = generic_mii_ioctl(&vp->mii, if_mii(rq), cmd, NULL); 3006 err = generic_mii_ioctl(&vp->mii, if_mii(rq), cmd, NULL);
2986 spin_unlock_irqrestore(&vp->lock, flags);
2987 if(state != 0) 3007 if(state != 0)
2988 pci_set_power_state(VORTEX_PCI(vp), state); 3008 pci_set_power_state(VORTEX_PCI(vp), state);
2989 3009
@@ -3188,6 +3208,9 @@ static void acpi_set_WOL(struct net_device *dev)
3188 return; 3208 return;
3189 } 3209 }
3190 3210
3211 if (VORTEX_PCI(vp)->current_state < PCI_D3hot)
3212 return;
3213
3191 /* Change the power state to D3; RxEnable doesn't take effect. */ 3214 /* Change the power state to D3; RxEnable doesn't take effect. */
3192 pci_set_power_state(VORTEX_PCI(vp), PCI_D3hot); 3215 pci_set_power_state(VORTEX_PCI(vp), PCI_D3hot);
3193 } 3216 }
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 5a6895320b48..5db667c0b371 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -928,6 +928,16 @@ config SMC91X
928 The module will be called smc91x. If you want to compile it as a 928 The module will be called smc91x. If you want to compile it as a
929 module, say M here and read <file:Documentation/kbuild/modules.txt>. 929 module, say M here and read <file:Documentation/kbuild/modules.txt>.
930 930
931config PXA168_ETH
932 tristate "Marvell pxa168 ethernet support"
933 depends on CPU_PXA168
934 select PHYLIB
935 help
936 This driver supports the pxa168 Ethernet ports.
937
938 To compile this driver as a module, choose M here. The module
939 will be called pxa168_eth.
940
931config NET_NETX 941config NET_NETX
932 tristate "NetX Ethernet support" 942 tristate "NetX Ethernet support"
933 select MII 943 select MII
@@ -2418,7 +2428,7 @@ config UGETH_TX_ON_DEMAND
2418 2428
2419config MV643XX_ETH 2429config MV643XX_ETH
2420 tristate "Marvell Discovery (643XX) and Orion ethernet support" 2430 tristate "Marvell Discovery (643XX) and Orion ethernet support"
2421 depends on MV64X60 || PPC32 || PLAT_ORION 2431 depends on (MV64X60 || PPC32 || PLAT_ORION) && INET
2422 select INET_LRO 2432 select INET_LRO
2423 select PHYLIB 2433 select PHYLIB
2424 help 2434 help
@@ -2793,7 +2803,7 @@ config NIU
2793 2803
2794config PASEMI_MAC 2804config PASEMI_MAC
2795 tristate "PA Semi 1/10Gbit MAC" 2805 tristate "PA Semi 1/10Gbit MAC"
2796 depends on PPC_PASEMI && PCI 2806 depends on PPC_PASEMI && PCI && INET
2797 select PHYLIB 2807 select PHYLIB
2798 select INET_LRO 2808 select INET_LRO
2799 help 2809 help
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 56e8c27f77ce..3e8f150c4b14 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -244,6 +244,7 @@ obj-$(CONFIG_MYRI10GE) += myri10ge/
244obj-$(CONFIG_SMC91X) += smc91x.o 244obj-$(CONFIG_SMC91X) += smc91x.o
245obj-$(CONFIG_SMC911X) += smc911x.o 245obj-$(CONFIG_SMC911X) += smc911x.o
246obj-$(CONFIG_SMSC911X) += smsc911x.o 246obj-$(CONFIG_SMSC911X) += smsc911x.o
247obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o
247obj-$(CONFIG_BFIN_MAC) += bfin_mac.o 248obj-$(CONFIG_BFIN_MAC) += bfin_mac.o
248obj-$(CONFIG_DM9000) += dm9000.o 249obj-$(CONFIG_DM9000) += dm9000.o
249obj-$(CONFIG_PASEMI_MAC) += pasemi_mac_driver.o 250obj-$(CONFIG_PASEMI_MAC) += pasemi_mac_driver.o
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index 63b9ba0cc67e..c73be2848319 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -1251,6 +1251,12 @@ static void atl1_free_ring_resources(struct atl1_adapter *adapter)
1251 1251
1252 rrd_ring->desc = NULL; 1252 rrd_ring->desc = NULL;
1253 rrd_ring->dma = 0; 1253 rrd_ring->dma = 0;
1254
1255 adapter->cmb.dma = 0;
1256 adapter->cmb.cmb = NULL;
1257
1258 adapter->smb.dma = 0;
1259 adapter->smb.smb = NULL;
1254} 1260}
1255 1261
1256static void atl1_setup_mac_ctrl(struct atl1_adapter *adapter) 1262static void atl1_setup_mac_ctrl(struct atl1_adapter *adapter)
@@ -2847,10 +2853,11 @@ static int atl1_resume(struct pci_dev *pdev)
2847 pci_enable_wake(pdev, PCI_D3cold, 0); 2853 pci_enable_wake(pdev, PCI_D3cold, 0);
2848 2854
2849 atl1_reset_hw(&adapter->hw); 2855 atl1_reset_hw(&adapter->hw);
2850 adapter->cmb.cmb->int_stats = 0;
2851 2856
2852 if (netif_running(netdev)) 2857 if (netif_running(netdev)) {
2858 adapter->cmb.cmb->int_stats = 0;
2853 atl1_up(adapter); 2859 atl1_up(adapter);
2860 }
2854 netif_device_attach(netdev); 2861 netif_device_attach(netdev);
2855 2862
2856 return 0; 2863 return 0;
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 37617abc1647..efeffdf9e5fa 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -848,6 +848,15 @@ static int b44_poll(struct napi_struct *napi, int budget)
848 b44_tx(bp); 848 b44_tx(bp);
849 /* spin_unlock(&bp->tx_lock); */ 849 /* spin_unlock(&bp->tx_lock); */
850 } 850 }
851 if (bp->istat & ISTAT_RFO) { /* fast recovery, in ~20msec */
852 bp->istat &= ~ISTAT_RFO;
853 b44_disable_ints(bp);
854 ssb_device_enable(bp->sdev, 0); /* resets ISTAT_RFO */
855 b44_init_rings(bp);
856 b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
857 netif_wake_queue(bp->dev);
858 }
859
851 spin_unlock_irqrestore(&bp->lock, flags); 860 spin_unlock_irqrestore(&bp->lock, flags);
852 861
853 work_done = 0; 862 work_done = 0;
@@ -2161,8 +2170,6 @@ static int __devinit b44_init_one(struct ssb_device *sdev,
2161 dev->irq = sdev->irq; 2170 dev->irq = sdev->irq;
2162 SET_ETHTOOL_OPS(dev, &b44_ethtool_ops); 2171 SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
2163 2172
2164 netif_carrier_off(dev);
2165
2166 err = ssb_bus_powerup(sdev->bus, 0); 2173 err = ssb_bus_powerup(sdev->bus, 0);
2167 if (err) { 2174 if (err) {
2168 dev_err(sdev->dev, 2175 dev_err(sdev->dev,
@@ -2204,6 +2211,8 @@ static int __devinit b44_init_one(struct ssb_device *sdev,
2204 goto err_out_powerdown; 2211 goto err_out_powerdown;
2205 } 2212 }
2206 2213
2214 netif_carrier_off(dev);
2215
2207 ssb_set_drvdata(sdev, dev); 2216 ssb_set_drvdata(sdev, dev);
2208 2217
2209 /* Chip reset provides power to the b44 MAC & PCI cores, which 2218 /* Chip reset provides power to the b44 MAC & PCI cores, which
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index 99197bd54da5..53306bf3f401 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -181,6 +181,7 @@ struct be_drvr_stats {
181 u64 be_rx_bytes_prev; 181 u64 be_rx_bytes_prev;
182 u64 be_rx_pkts; 182 u64 be_rx_pkts;
183 u32 be_rx_rate; 183 u32 be_rx_rate;
184 u32 be_rx_mcast_pkt;
184 /* number of non ether type II frames dropped where 185 /* number of non ether type II frames dropped where
185 * frame len > length field of Mac Hdr */ 186 * frame len > length field of Mac Hdr */
186 u32 be_802_3_dropped_frames; 187 u32 be_802_3_dropped_frames;
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 3d305494a606..34abcc9403d6 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -140,10 +140,8 @@ int be_process_mcc(struct be_adapter *adapter, int *status)
140 while ((compl = be_mcc_compl_get(adapter))) { 140 while ((compl = be_mcc_compl_get(adapter))) {
141 if (compl->flags & CQE_FLAGS_ASYNC_MASK) { 141 if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
142 /* Interpret flags as an async trailer */ 142 /* Interpret flags as an async trailer */
143 BUG_ON(!is_link_state_evt(compl->flags)); 143 if (is_link_state_evt(compl->flags))
144 144 be_async_link_state_process(adapter,
145 /* Interpret compl as a async link evt */
146 be_async_link_state_process(adapter,
147 (struct be_async_event_link_state *) compl); 145 (struct be_async_event_link_state *) compl);
148 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) { 146 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
149 *status = be_mcc_compl_process(adapter, compl); 147 *status = be_mcc_compl_process(adapter, compl);
@@ -207,7 +205,7 @@ static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
207 205
208 if (msecs > 4000) { 206 if (msecs > 4000) {
209 dev_err(&adapter->pdev->dev, "mbox poll timed out\n"); 207 dev_err(&adapter->pdev->dev, "mbox poll timed out\n");
210 be_dump_ue(adapter); 208 be_detect_dump_ue(adapter);
211 return -1; 209 return -1;
212 } 210 }
213 211
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index bdc10a28cfda..ad1e6fac60c5 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -992,5 +992,5 @@ extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
992extern int be_cmd_get_phy_info(struct be_adapter *adapter, 992extern int be_cmd_get_phy_info(struct be_adapter *adapter,
993 struct be_dma_mem *cmd); 993 struct be_dma_mem *cmd);
994extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain); 994extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain);
995extern void be_dump_ue(struct be_adapter *adapter); 995extern void be_detect_dump_ue(struct be_adapter *adapter);
996 996
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index cd16243c7c36..13f0abbc5205 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -60,6 +60,7 @@ static const struct be_ethtool_stat et_stats[] = {
60 {DRVSTAT_INFO(be_rx_events)}, 60 {DRVSTAT_INFO(be_rx_events)},
61 {DRVSTAT_INFO(be_tx_compl)}, 61 {DRVSTAT_INFO(be_tx_compl)},
62 {DRVSTAT_INFO(be_rx_compl)}, 62 {DRVSTAT_INFO(be_rx_compl)},
63 {DRVSTAT_INFO(be_rx_mcast_pkt)},
63 {DRVSTAT_INFO(be_ethrx_post_fail)}, 64 {DRVSTAT_INFO(be_ethrx_post_fail)},
64 {DRVSTAT_INFO(be_802_3_dropped_frames)}, 65 {DRVSTAT_INFO(be_802_3_dropped_frames)},
65 {DRVSTAT_INFO(be_802_3_malformed_frames)}, 66 {DRVSTAT_INFO(be_802_3_malformed_frames)},
diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h
index 5d38046402b2..a2ec5df0d733 100644
--- a/drivers/net/benet/be_hw.h
+++ b/drivers/net/benet/be_hw.h
@@ -167,8 +167,11 @@
167#define FLASH_FCoE_BIOS_START_g3 (13631488) 167#define FLASH_FCoE_BIOS_START_g3 (13631488)
168#define FLASH_REDBOOT_START_g3 (262144) 168#define FLASH_REDBOOT_START_g3 (262144)
169 169
170 170/************* Rx Packet Type Encoding **************/
171 171#define BE_UNICAST_PACKET 0
172#define BE_MULTICAST_PACKET 1
173#define BE_BROADCAST_PACKET 2
174#define BE_RSVD_PACKET 3
172 175
173/* 176/*
174 * BE descriptors: host memory data structures whose formats 177 * BE descriptors: host memory data structures whose formats
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 74e146f470c6..6eda7a022256 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -247,6 +247,7 @@ void netdev_stats_update(struct be_adapter *adapter)
247 dev_stats->tx_packets = drvr_stats(adapter)->be_tx_pkts; 247 dev_stats->tx_packets = drvr_stats(adapter)->be_tx_pkts;
248 dev_stats->rx_bytes = drvr_stats(adapter)->be_rx_bytes; 248 dev_stats->rx_bytes = drvr_stats(adapter)->be_rx_bytes;
249 dev_stats->tx_bytes = drvr_stats(adapter)->be_tx_bytes; 249 dev_stats->tx_bytes = drvr_stats(adapter)->be_tx_bytes;
250 dev_stats->multicast = drvr_stats(adapter)->be_rx_mcast_pkt;
250 251
251 /* bad pkts received */ 252 /* bad pkts received */
252 dev_stats->rx_errors = port_stats->rx_crc_errors + 253 dev_stats->rx_errors = port_stats->rx_crc_errors +
@@ -294,7 +295,6 @@ void netdev_stats_update(struct be_adapter *adapter)
294 /* no space available in linux */ 295 /* no space available in linux */
295 dev_stats->tx_dropped = 0; 296 dev_stats->tx_dropped = 0;
296 297
297 dev_stats->multicast = port_stats->rx_multicast_frames;
298 dev_stats->collisions = 0; 298 dev_stats->collisions = 0;
299 299
300 /* detailed tx_errors */ 300 /* detailed tx_errors */
@@ -848,7 +848,7 @@ static void be_rx_rate_update(struct be_adapter *adapter)
848} 848}
849 849
850static void be_rx_stats_update(struct be_adapter *adapter, 850static void be_rx_stats_update(struct be_adapter *adapter,
851 u32 pktsize, u16 numfrags) 851 u32 pktsize, u16 numfrags, u8 pkt_type)
852{ 852{
853 struct be_drvr_stats *stats = drvr_stats(adapter); 853 struct be_drvr_stats *stats = drvr_stats(adapter);
854 854
@@ -856,6 +856,9 @@ static void be_rx_stats_update(struct be_adapter *adapter,
856 stats->be_rx_frags += numfrags; 856 stats->be_rx_frags += numfrags;
857 stats->be_rx_bytes += pktsize; 857 stats->be_rx_bytes += pktsize;
858 stats->be_rx_pkts++; 858 stats->be_rx_pkts++;
859
860 if (pkt_type == BE_MULTICAST_PACKET)
861 stats->be_rx_mcast_pkt++;
859} 862}
860 863
861static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso) 864static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso)
@@ -925,9 +928,11 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
925 u16 rxq_idx, i, j; 928 u16 rxq_idx, i, j;
926 u32 pktsize, hdr_len, curr_frag_len, size; 929 u32 pktsize, hdr_len, curr_frag_len, size;
927 u8 *start; 930 u8 *start;
931 u8 pkt_type;
928 932
929 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); 933 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
930 pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp); 934 pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
935 pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
931 936
932 page_info = get_rx_page_info(adapter, rxq_idx); 937 page_info = get_rx_page_info(adapter, rxq_idx);
933 938
@@ -993,7 +998,7 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
993 BUG_ON(j > MAX_SKB_FRAGS); 998 BUG_ON(j > MAX_SKB_FRAGS);
994 999
995done: 1000done:
996 be_rx_stats_update(adapter, pktsize, num_rcvd); 1001 be_rx_stats_update(adapter, pktsize, num_rcvd, pkt_type);
997} 1002}
998 1003
999/* Process the RX completion indicated by rxcp when GRO is disabled */ 1004/* Process the RX completion indicated by rxcp when GRO is disabled */
@@ -1060,6 +1065,7 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
1060 u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len; 1065 u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
1061 u16 i, rxq_idx = 0, vid, j; 1066 u16 i, rxq_idx = 0, vid, j;
1062 u8 vtm; 1067 u8 vtm;
1068 u8 pkt_type;
1063 1069
1064 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp); 1070 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
1065 /* Is it a flush compl that has no data */ 1071 /* Is it a flush compl that has no data */
@@ -1070,6 +1076,7 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
1070 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp); 1076 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1071 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); 1077 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
1072 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp); 1078 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
1079 pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
1073 1080
1074 /* vlanf could be wrongly set in some cards. 1081 /* vlanf could be wrongly set in some cards.
1075 * ignore if vtm is not set */ 1082 * ignore if vtm is not set */
@@ -1125,7 +1132,7 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
1125 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid); 1132 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid);
1126 } 1133 }
1127 1134
1128 be_rx_stats_update(adapter, pkt_size, num_rcvd); 1135 be_rx_stats_update(adapter, pkt_size, num_rcvd, pkt_type);
1129} 1136}
1130 1137
1131static struct be_eth_rx_compl *be_rx_compl_get(struct be_adapter *adapter) 1138static struct be_eth_rx_compl *be_rx_compl_get(struct be_adapter *adapter)
@@ -1743,26 +1750,7 @@ static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1743 return 1; 1750 return 1;
1744} 1751}
1745 1752
1746static inline bool be_detect_ue(struct be_adapter *adapter) 1753void be_detect_dump_ue(struct be_adapter *adapter)
1747{
1748 u32 online0 = 0, online1 = 0;
1749
1750 pci_read_config_dword(adapter->pdev, PCICFG_ONLINE0, &online0);
1751
1752 pci_read_config_dword(adapter->pdev, PCICFG_ONLINE1, &online1);
1753
1754 if (!online0 || !online1) {
1755 adapter->ue_detected = true;
1756 dev_err(&adapter->pdev->dev,
1757 "UE Detected!! online0=%d online1=%d\n",
1758 online0, online1);
1759 return true;
1760 }
1761
1762 return false;
1763}
1764
1765void be_dump_ue(struct be_adapter *adapter)
1766{ 1754{
1767 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask; 1755 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1768 u32 i; 1756 u32 i;
@@ -1779,6 +1767,11 @@ void be_dump_ue(struct be_adapter *adapter)
1779 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask)); 1767 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1780 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask)); 1768 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1781 1769
1770 if (ue_status_lo || ue_status_hi) {
1771 adapter->ue_detected = true;
1772 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1773 }
1774
1782 if (ue_status_lo) { 1775 if (ue_status_lo) {
1783 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) { 1776 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1784 if (ue_status_lo & 1) 1777 if (ue_status_lo & 1)
@@ -1814,10 +1807,8 @@ static void be_worker(struct work_struct *work)
1814 adapter->rx_post_starved = false; 1807 adapter->rx_post_starved = false;
1815 be_post_rx_frags(adapter); 1808 be_post_rx_frags(adapter);
1816 } 1809 }
1817 if (!adapter->ue_detected) { 1810 if (!adapter->ue_detected)
1818 if (be_detect_ue(adapter)) 1811 be_detect_dump_ue(adapter);
1819 be_dump_ue(adapter);
1820 }
1821 1812
1822 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000)); 1813 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1823} 1814}
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index 53af9c93e75c..0c2d96ed561c 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -20,8 +20,8 @@
20 * (you will need to reboot afterwards) */ 20 * (you will need to reboot afterwards) */
21/* #define BNX2X_STOP_ON_ERROR */ 21/* #define BNX2X_STOP_ON_ERROR */
22 22
23#define DRV_MODULE_VERSION "1.52.53-3" 23#define DRV_MODULE_VERSION "1.52.53-4"
24#define DRV_MODULE_RELDATE "2010/18/04" 24#define DRV_MODULE_RELDATE "2010/16/08"
25#define BNX2X_BC_VER 0x040200 25#define BNX2X_BC_VER 0x040200
26 26
27#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) 27#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index b4ec2b02a465..f8c3f08e4ce7 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -4328,10 +4328,12 @@ static int bnx2x_init_port(struct bnx2x *bp)
4328 val |= aeu_gpio_mask; 4328 val |= aeu_gpio_mask;
4329 REG_WR(bp, offset, val); 4329 REG_WR(bp, offset, val);
4330 } 4330 }
4331 bp->port.need_hw_lock = 1;
4331 break; 4332 break;
4332 4333
4333 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
4334 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727: 4334 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
4335 bp->port.need_hw_lock = 1;
4336 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
4335 /* add SPIO 5 to group 0 */ 4337 /* add SPIO 5 to group 0 */
4336 { 4338 {
4337 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 4339 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
@@ -4341,7 +4343,10 @@ static int bnx2x_init_port(struct bnx2x *bp)
4341 REG_WR(bp, reg_addr, val); 4343 REG_WR(bp, reg_addr, val);
4342 } 4344 }
4343 break; 4345 break;
4344 4346 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
4347 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
4348 bp->port.need_hw_lock = 1;
4349 break;
4345 default: 4350 default:
4346 break; 4351 break;
4347 } 4352 }
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 822f586d72af..0ddf4c66afe2 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -2466,6 +2466,9 @@ int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct net_device *dev, struct pac
2466 if (!(dev->flags & IFF_MASTER)) 2466 if (!(dev->flags & IFF_MASTER))
2467 goto out; 2467 goto out;
2468 2468
2469 if (!pskb_may_pull(skb, sizeof(struct lacpdu)))
2470 goto out;
2471
2469 read_lock(&bond->lock); 2472 read_lock(&bond->lock);
2470 slave = bond_get_slave_by_dev((struct bonding *)netdev_priv(dev), 2473 slave = bond_get_slave_by_dev((struct bonding *)netdev_priv(dev),
2471 orig_dev); 2474 orig_dev);
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index c746b331771d..26bb118c4533 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -362,6 +362,9 @@ static int rlb_arp_recv(struct sk_buff *skb, struct net_device *bond_dev, struct
362 goto out; 362 goto out;
363 } 363 }
364 364
365 if (!pskb_may_pull(skb, arp_hdr_len(bond_dev)))
366 goto out;
367
365 if (skb->len < sizeof(struct arp_pkt)) { 368 if (skb->len < sizeof(struct arp_pkt)) {
366 pr_debug("Packet is too small to be an ARP\n"); 369 pr_debug("Packet is too small to be an ARP\n");
367 goto out; 370 goto out;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 2cc4cfc31892..e953c6ad6e6d 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2797,9 +2797,15 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
2797 * so it can wait 2797 * so it can wait
2798 */ 2798 */
2799 bond_for_each_slave(bond, slave, i) { 2799 bond_for_each_slave(bond, slave, i) {
2800 unsigned long trans_start = dev_trans_start(slave->dev);
2801
2800 if (slave->link != BOND_LINK_UP) { 2802 if (slave->link != BOND_LINK_UP) {
2801 if (time_before_eq(jiffies, dev_trans_start(slave->dev) + delta_in_ticks) && 2803 if (time_in_range(jiffies,
2802 time_before_eq(jiffies, slave->dev->last_rx + delta_in_ticks)) { 2804 trans_start - delta_in_ticks,
2805 trans_start + delta_in_ticks) &&
2806 time_in_range(jiffies,
2807 slave->dev->last_rx - delta_in_ticks,
2808 slave->dev->last_rx + delta_in_ticks)) {
2803 2809
2804 slave->link = BOND_LINK_UP; 2810 slave->link = BOND_LINK_UP;
2805 slave->state = BOND_STATE_ACTIVE; 2811 slave->state = BOND_STATE_ACTIVE;
@@ -2827,8 +2833,12 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
2827 * when the source ip is 0, so don't take the link down 2833 * when the source ip is 0, so don't take the link down
2828 * if we don't know our ip yet 2834 * if we don't know our ip yet
2829 */ 2835 */
2830 if (time_after_eq(jiffies, dev_trans_start(slave->dev) + 2*delta_in_ticks) || 2836 if (!time_in_range(jiffies,
2831 (time_after_eq(jiffies, slave->dev->last_rx + 2*delta_in_ticks))) { 2837 trans_start - delta_in_ticks,
2838 trans_start + 2 * delta_in_ticks) ||
2839 !time_in_range(jiffies,
2840 slave->dev->last_rx - delta_in_ticks,
2841 slave->dev->last_rx + 2 * delta_in_ticks)) {
2832 2842
2833 slave->link = BOND_LINK_DOWN; 2843 slave->link = BOND_LINK_DOWN;
2834 slave->state = BOND_STATE_BACKUP; 2844 slave->state = BOND_STATE_BACKUP;
@@ -2883,13 +2893,16 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
2883{ 2893{
2884 struct slave *slave; 2894 struct slave *slave;
2885 int i, commit = 0; 2895 int i, commit = 0;
2896 unsigned long trans_start;
2886 2897
2887 bond_for_each_slave(bond, slave, i) { 2898 bond_for_each_slave(bond, slave, i) {
2888 slave->new_link = BOND_LINK_NOCHANGE; 2899 slave->new_link = BOND_LINK_NOCHANGE;
2889 2900
2890 if (slave->link != BOND_LINK_UP) { 2901 if (slave->link != BOND_LINK_UP) {
2891 if (time_before_eq(jiffies, slave_last_rx(bond, slave) + 2902 if (time_in_range(jiffies,
2892 delta_in_ticks)) { 2903 slave_last_rx(bond, slave) - delta_in_ticks,
2904 slave_last_rx(bond, slave) + delta_in_ticks)) {
2905
2893 slave->new_link = BOND_LINK_UP; 2906 slave->new_link = BOND_LINK_UP;
2894 commit++; 2907 commit++;
2895 } 2908 }
@@ -2902,8 +2915,9 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
2902 * active. This avoids bouncing, as the last receive 2915 * active. This avoids bouncing, as the last receive
2903 * times need a full ARP monitor cycle to be updated. 2916 * times need a full ARP monitor cycle to be updated.
2904 */ 2917 */
2905 if (!time_after_eq(jiffies, slave->jiffies + 2918 if (time_in_range(jiffies,
2906 2 * delta_in_ticks)) 2919 slave->jiffies - delta_in_ticks,
2920 slave->jiffies + 2 * delta_in_ticks))
2907 continue; 2921 continue;
2908 2922
2909 /* 2923 /*
@@ -2921,8 +2935,10 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
2921 */ 2935 */
2922 if (slave->state == BOND_STATE_BACKUP && 2936 if (slave->state == BOND_STATE_BACKUP &&
2923 !bond->current_arp_slave && 2937 !bond->current_arp_slave &&
2924 time_after(jiffies, slave_last_rx(bond, slave) + 2938 !time_in_range(jiffies,
2925 3 * delta_in_ticks)) { 2939 slave_last_rx(bond, slave) - delta_in_ticks,
2940 slave_last_rx(bond, slave) + 3 * delta_in_ticks)) {
2941
2926 slave->new_link = BOND_LINK_DOWN; 2942 slave->new_link = BOND_LINK_DOWN;
2927 commit++; 2943 commit++;
2928 } 2944 }
@@ -2933,11 +2949,15 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
2933 * - (more than 2*delta since receive AND 2949 * - (more than 2*delta since receive AND
2934 * the bond has an IP address) 2950 * the bond has an IP address)
2935 */ 2951 */
2952 trans_start = dev_trans_start(slave->dev);
2936 if ((slave->state == BOND_STATE_ACTIVE) && 2953 if ((slave->state == BOND_STATE_ACTIVE) &&
2937 (time_after_eq(jiffies, dev_trans_start(slave->dev) + 2954 (!time_in_range(jiffies,
2938 2 * delta_in_ticks) || 2955 trans_start - delta_in_ticks,
2939 (time_after_eq(jiffies, slave_last_rx(bond, slave) 2956 trans_start + 2 * delta_in_ticks) ||
2940 + 2 * delta_in_ticks)))) { 2957 !time_in_range(jiffies,
2958 slave_last_rx(bond, slave) - delta_in_ticks,
2959 slave_last_rx(bond, slave) + 2 * delta_in_ticks))) {
2960
2941 slave->new_link = BOND_LINK_DOWN; 2961 slave->new_link = BOND_LINK_DOWN;
2942 commit++; 2962 commit++;
2943 } 2963 }
@@ -2956,6 +2976,7 @@ static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks)
2956{ 2976{
2957 struct slave *slave; 2977 struct slave *slave;
2958 int i; 2978 int i;
2979 unsigned long trans_start;
2959 2980
2960 bond_for_each_slave(bond, slave, i) { 2981 bond_for_each_slave(bond, slave, i) {
2961 switch (slave->new_link) { 2982 switch (slave->new_link) {
@@ -2963,10 +2984,11 @@ static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks)
2963 continue; 2984 continue;
2964 2985
2965 case BOND_LINK_UP: 2986 case BOND_LINK_UP:
2987 trans_start = dev_trans_start(slave->dev);
2966 if ((!bond->curr_active_slave && 2988 if ((!bond->curr_active_slave &&
2967 time_before_eq(jiffies, 2989 time_in_range(jiffies,
2968 dev_trans_start(slave->dev) + 2990 trans_start - delta_in_ticks,
2969 delta_in_ticks)) || 2991 trans_start + delta_in_ticks)) ||
2970 bond->curr_active_slave != slave) { 2992 bond->curr_active_slave != slave) {
2971 slave->link = BOND_LINK_UP; 2993 slave->link = BOND_LINK_UP;
2972 bond->current_arp_slave = NULL; 2994 bond->current_arp_slave = NULL;
@@ -5142,6 +5164,15 @@ int bond_create(struct net *net, const char *name)
5142 res = dev_alloc_name(bond_dev, "bond%d"); 5164 res = dev_alloc_name(bond_dev, "bond%d");
5143 if (res < 0) 5165 if (res < 0)
5144 goto out; 5166 goto out;
5167 } else {
5168 /*
5169 * If we're given a name to register
5170 * we need to ensure that its not already
5171 * registered
5172 */
5173 res = -EEXIST;
5174 if (__dev_get_by_name(net, name) != NULL)
5175 goto out;
5145 } 5176 }
5146 5177
5147 res = register_netdevice(bond_dev); 5178 res = register_netdevice(bond_dev);
diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig
index 631a6242b011..75bfc3a9d95f 100644
--- a/drivers/net/caif/Kconfig
+++ b/drivers/net/caif/Kconfig
@@ -15,7 +15,7 @@ config CAIF_TTY
15 15
16config CAIF_SPI_SLAVE 16config CAIF_SPI_SLAVE
17 tristate "CAIF SPI transport driver for slave interface" 17 tristate "CAIF SPI transport driver for slave interface"
18 depends on CAIF 18 depends on CAIF && HAS_DMA
19 default n 19 default n
20 ---help--- 20 ---help---
21 The CAIF Link layer SPI Protocol driver for Slave SPI interface. 21 The CAIF Link layer SPI Protocol driver for Slave SPI interface.
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index ad19585d960b..f208712c0b90 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -2296,6 +2296,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
2296 case CHELSIO_GET_QSET_NUM:{ 2296 case CHELSIO_GET_QSET_NUM:{
2297 struct ch_reg edata; 2297 struct ch_reg edata;
2298 2298
2299 memset(&edata, 0, sizeof(struct ch_reg));
2300
2299 edata.cmd = CHELSIO_GET_QSET_NUM; 2301 edata.cmd = CHELSIO_GET_QSET_NUM;
2300 edata.val = pi->nqsets; 2302 edata.val = pi->nqsets;
2301 if (copy_to_user(useraddr, &edata, sizeof(edata))) 2303 if (copy_to_user(useraddr, &edata, sizeof(edata)))
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index a4a0d2b6eb1c..d3d4a57e2450 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -936,12 +936,14 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
936 ew32(IMC, 0xffffffff); 936 ew32(IMC, 0xffffffff);
937 icr = er32(ICR); 937 icr = er32(ICR);
938 938
939 /* Install any alternate MAC address into RAR0 */ 939 if (hw->mac.type == e1000_82571) {
940 ret_val = e1000_check_alt_mac_addr_generic(hw); 940 /* Install any alternate MAC address into RAR0 */
941 if (ret_val) 941 ret_val = e1000_check_alt_mac_addr_generic(hw);
942 return ret_val; 942 if (ret_val)
943 return ret_val;
943 944
944 e1000e_set_laa_state_82571(hw, true); 945 e1000e_set_laa_state_82571(hw, true);
946 }
945 947
946 /* Reinitialize the 82571 serdes link state machine */ 948 /* Reinitialize the 82571 serdes link state machine */
947 if (hw->phy.media_type == e1000_media_type_internal_serdes) 949 if (hw->phy.media_type == e1000_media_type_internal_serdes)
@@ -1618,14 +1620,16 @@ static s32 e1000_read_mac_addr_82571(struct e1000_hw *hw)
1618{ 1620{
1619 s32 ret_val = 0; 1621 s32 ret_val = 0;
1620 1622
1621 /* 1623 if (hw->mac.type == e1000_82571) {
1622 * If there's an alternate MAC address place it in RAR0 1624 /*
1623 * so that it will override the Si installed default perm 1625 * If there's an alternate MAC address place it in RAR0
1624 * address. 1626 * so that it will override the Si installed default perm
1625 */ 1627 * address.
1626 ret_val = e1000_check_alt_mac_addr_generic(hw); 1628 */
1627 if (ret_val) 1629 ret_val = e1000_check_alt_mac_addr_generic(hw);
1628 goto out; 1630 if (ret_val)
1631 goto out;
1632 }
1629 1633
1630 ret_val = e1000_read_mac_addr_generic(hw); 1634 ret_val = e1000_read_mac_addr_generic(hw);
1631 1635
@@ -1833,6 +1837,7 @@ struct e1000_info e1000_82573_info = {
1833 | FLAG_HAS_SMART_POWER_DOWN 1837 | FLAG_HAS_SMART_POWER_DOWN
1834 | FLAG_HAS_AMT 1838 | FLAG_HAS_AMT
1835 | FLAG_HAS_SWSM_ON_LOAD, 1839 | FLAG_HAS_SWSM_ON_LOAD,
1840 .flags2 = FLAG2_DISABLE_ASPM_L1,
1836 .pba = 20, 1841 .pba = 20,
1837 .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN, 1842 .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN,
1838 .get_variants = e1000_get_variants_82571, 1843 .get_variants = e1000_get_variants_82571,
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
index 307a72f483ee..93b3bedae8d2 100644
--- a/drivers/net/e1000e/defines.h
+++ b/drivers/net/e1000e/defines.h
@@ -621,6 +621,7 @@
621#define E1000_FLASH_UPDATES 2000 621#define E1000_FLASH_UPDATES 2000
622 622
623/* NVM Word Offsets */ 623/* NVM Word Offsets */
624#define NVM_COMPAT 0x0003
624#define NVM_ID_LED_SETTINGS 0x0004 625#define NVM_ID_LED_SETTINGS 0x0004
625#define NVM_INIT_CONTROL2_REG 0x000F 626#define NVM_INIT_CONTROL2_REG 0x000F
626#define NVM_INIT_CONTROL3_PORT_B 0x0014 627#define NVM_INIT_CONTROL3_PORT_B 0x0014
@@ -643,6 +644,9 @@
643/* Mask bits for fields in Word 0x1a of the NVM */ 644/* Mask bits for fields in Word 0x1a of the NVM */
644#define NVM_WORD1A_ASPM_MASK 0x000C 645#define NVM_WORD1A_ASPM_MASK 0x000C
645 646
647/* Mask bits for fields in Word 0x03 of the EEPROM */
648#define NVM_COMPAT_LOM 0x0800
649
646/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */ 650/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */
647#define NVM_SUM 0xBABA 651#define NVM_SUM 0xBABA
648 652
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index 66ed08f726fb..ba302a5c2c30 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -57,6 +57,7 @@ enum e1e_registers {
57 E1000_SCTL = 0x00024, /* SerDes Control - RW */ 57 E1000_SCTL = 0x00024, /* SerDes Control - RW */
58 E1000_FCAL = 0x00028, /* Flow Control Address Low - RW */ 58 E1000_FCAL = 0x00028, /* Flow Control Address Low - RW */
59 E1000_FCAH = 0x0002C, /* Flow Control Address High -RW */ 59 E1000_FCAH = 0x0002C, /* Flow Control Address High -RW */
60 E1000_FEXTNVM4 = 0x00024, /* Future Extended NVM 4 - RW */
60 E1000_FEXTNVM = 0x00028, /* Future Extended NVM - RW */ 61 E1000_FEXTNVM = 0x00028, /* Future Extended NVM - RW */
61 E1000_FCT = 0x00030, /* Flow Control Type - RW */ 62 E1000_FCT = 0x00030, /* Flow Control Type - RW */
62 E1000_VET = 0x00038, /* VLAN Ether Type - RW */ 63 E1000_VET = 0x00038, /* VLAN Ether Type - RW */
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index 63930d12711c..57b5435599ab 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -105,6 +105,10 @@
105#define E1000_FEXTNVM_SW_CONFIG 1 105#define E1000_FEXTNVM_SW_CONFIG 1
106#define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* Bit redefined for ICH8M :/ */ 106#define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* Bit redefined for ICH8M :/ */
107 107
108#define E1000_FEXTNVM4_BEACON_DURATION_MASK 0x7
109#define E1000_FEXTNVM4_BEACON_DURATION_8USEC 0x7
110#define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3
111
108#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL 112#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
109 113
110#define E1000_ICH_RAR_ENTRIES 7 114#define E1000_ICH_RAR_ENTRIES 7
@@ -125,6 +129,7 @@
125 129
126/* SMBus Address Phy Register */ 130/* SMBus Address Phy Register */
127#define HV_SMB_ADDR PHY_REG(768, 26) 131#define HV_SMB_ADDR PHY_REG(768, 26)
132#define HV_SMB_ADDR_MASK 0x007F
128#define HV_SMB_ADDR_PEC_EN 0x0200 133#define HV_SMB_ADDR_PEC_EN 0x0200
129#define HV_SMB_ADDR_VALID 0x0080 134#define HV_SMB_ADDR_VALID 0x0080
130 135
@@ -237,6 +242,8 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
237static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw); 242static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
238static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw); 243static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
239static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw); 244static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
245static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
246static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
240 247
241static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg) 248static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg)
242{ 249{
@@ -272,7 +279,7 @@ static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val)
272static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) 279static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
273{ 280{
274 struct e1000_phy_info *phy = &hw->phy; 281 struct e1000_phy_info *phy = &hw->phy;
275 u32 ctrl; 282 u32 ctrl, fwsm;
276 s32 ret_val = 0; 283 s32 ret_val = 0;
277 284
278 phy->addr = 1; 285 phy->addr = 1;
@@ -294,7 +301,8 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
294 * disabled, then toggle the LANPHYPC Value bit to force 301 * disabled, then toggle the LANPHYPC Value bit to force
295 * the interconnect to PCIe mode. 302 * the interconnect to PCIe mode.
296 */ 303 */
297 if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) { 304 fwsm = er32(FWSM);
305 if (!(fwsm & E1000_ICH_FWSM_FW_VALID)) {
298 ctrl = er32(CTRL); 306 ctrl = er32(CTRL);
299 ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE; 307 ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE;
300 ctrl &= ~E1000_CTRL_LANPHYPC_VALUE; 308 ctrl &= ~E1000_CTRL_LANPHYPC_VALUE;
@@ -303,6 +311,13 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
303 ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE; 311 ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
304 ew32(CTRL, ctrl); 312 ew32(CTRL, ctrl);
305 msleep(50); 313 msleep(50);
314
315 /*
316 * Gate automatic PHY configuration by hardware on
317 * non-managed 82579
318 */
319 if (hw->mac.type == e1000_pch2lan)
320 e1000_gate_hw_phy_config_ich8lan(hw, true);
306 } 321 }
307 322
308 /* 323 /*
@@ -315,6 +330,13 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
315 if (ret_val) 330 if (ret_val)
316 goto out; 331 goto out;
317 332
333 /* Ungate automatic PHY configuration on non-managed 82579 */
334 if ((hw->mac.type == e1000_pch2lan) &&
335 !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
336 msleep(10);
337 e1000_gate_hw_phy_config_ich8lan(hw, false);
338 }
339
318 phy->id = e1000_phy_unknown; 340 phy->id = e1000_phy_unknown;
319 ret_val = e1000e_get_phy_id(hw); 341 ret_val = e1000e_get_phy_id(hw);
320 if (ret_val) 342 if (ret_val)
@@ -561,13 +583,10 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter)
561 if (mac->type == e1000_ich8lan) 583 if (mac->type == e1000_ich8lan)
562 e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true); 584 e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
563 585
564 /* Disable PHY configuration by hardware, config by software */ 586 /* Gate automatic PHY configuration by hardware on managed 82579 */
565 if (mac->type == e1000_pch2lan) { 587 if ((mac->type == e1000_pch2lan) &&
566 u32 extcnf_ctrl = er32(EXTCNF_CTRL); 588 (er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
567 589 e1000_gate_hw_phy_config_ich8lan(hw, true);
568 extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
569 ew32(EXTCNF_CTRL, extcnf_ctrl);
570 }
571 590
572 return 0; 591 return 0;
573} 592}
@@ -652,6 +671,12 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
652 goto out; 671 goto out;
653 } 672 }
654 673
674 if (hw->mac.type == e1000_pch2lan) {
675 ret_val = e1000_k1_workaround_lv(hw);
676 if (ret_val)
677 goto out;
678 }
679
655 /* 680 /*
656 * Check if there was DownShift, must be checked 681 * Check if there was DownShift, must be checked
657 * immediately after link-up 682 * immediately after link-up
@@ -895,6 +920,34 @@ static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
895} 920}
896 921
897/** 922/**
923 * e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
924 * @hw: pointer to the HW structure
925 *
926 * Assumes semaphore already acquired.
927 *
928 **/
929static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
930{
931 u16 phy_data;
932 u32 strap = er32(STRAP);
933 s32 ret_val = 0;
934
935 strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
936
937 ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
938 if (ret_val)
939 goto out;
940
941 phy_data &= ~HV_SMB_ADDR_MASK;
942 phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
943 phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
944 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
945
946out:
947 return ret_val;
948}
949
950/**
898 * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration 951 * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
899 * @hw: pointer to the HW structure 952 * @hw: pointer to the HW structure
900 * 953 *
@@ -903,7 +956,6 @@ static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
903 **/ 956 **/
904static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) 957static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
905{ 958{
906 struct e1000_adapter *adapter = hw->adapter;
907 struct e1000_phy_info *phy = &hw->phy; 959 struct e1000_phy_info *phy = &hw->phy;
908 u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask; 960 u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
909 s32 ret_val = 0; 961 s32 ret_val = 0;
@@ -921,7 +973,8 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
921 if (phy->type != e1000_phy_igp_3) 973 if (phy->type != e1000_phy_igp_3)
922 return ret_val; 974 return ret_val;
923 975
924 if (adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_AMT) { 976 if ((hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_AMT) ||
977 (hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_C)) {
925 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG; 978 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
926 break; 979 break;
927 } 980 }
@@ -961,21 +1014,16 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
961 cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK; 1014 cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
962 cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT; 1015 cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
963 1016
964 if (!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) && 1017 if ((!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) &&
965 ((hw->mac.type == e1000_pchlan) || 1018 (hw->mac.type == e1000_pchlan)) ||
966 (hw->mac.type == e1000_pch2lan))) { 1019 (hw->mac.type == e1000_pch2lan)) {
967 /* 1020 /*
968 * HW configures the SMBus address and LEDs when the 1021 * HW configures the SMBus address and LEDs when the
969 * OEM and LCD Write Enable bits are set in the NVM. 1022 * OEM and LCD Write Enable bits are set in the NVM.
970 * When both NVM bits are cleared, SW will configure 1023 * When both NVM bits are cleared, SW will configure
971 * them instead. 1024 * them instead.
972 */ 1025 */
973 data = er32(STRAP); 1026 ret_val = e1000_write_smbus_addr(hw);
974 data &= E1000_STRAP_SMBUS_ADDRESS_MASK;
975 reg_data = data >> E1000_STRAP_SMBUS_ADDRESS_SHIFT;
976 reg_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
977 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR,
978 reg_data);
979 if (ret_val) 1027 if (ret_val)
980 goto out; 1028 goto out;
981 1029
@@ -1440,10 +1488,6 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
1440 goto out; 1488 goto out;
1441 1489
1442 /* Enable jumbo frame workaround in the PHY */ 1490 /* Enable jumbo frame workaround in the PHY */
1443 e1e_rphy(hw, PHY_REG(769, 20), &data);
1444 ret_val = e1e_wphy(hw, PHY_REG(769, 20), data & ~(1 << 14));
1445 if (ret_val)
1446 goto out;
1447 e1e_rphy(hw, PHY_REG(769, 23), &data); 1491 e1e_rphy(hw, PHY_REG(769, 23), &data);
1448 data &= ~(0x7F << 5); 1492 data &= ~(0x7F << 5);
1449 data |= (0x37 << 5); 1493 data |= (0x37 << 5);
@@ -1452,7 +1496,6 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
1452 goto out; 1496 goto out;
1453 e1e_rphy(hw, PHY_REG(769, 16), &data); 1497 e1e_rphy(hw, PHY_REG(769, 16), &data);
1454 data &= ~(1 << 13); 1498 data &= ~(1 << 13);
1455 data |= (1 << 12);
1456 ret_val = e1e_wphy(hw, PHY_REG(769, 16), data); 1499 ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
1457 if (ret_val) 1500 if (ret_val)
1458 goto out; 1501 goto out;
@@ -1477,7 +1520,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
1477 1520
1478 mac_reg = er32(RCTL); 1521 mac_reg = er32(RCTL);
1479 mac_reg &= ~E1000_RCTL_SECRC; 1522 mac_reg &= ~E1000_RCTL_SECRC;
1480 ew32(FFLT_DBG, mac_reg); 1523 ew32(RCTL, mac_reg);
1481 1524
1482 ret_val = e1000e_read_kmrn_reg(hw, 1525 ret_val = e1000e_read_kmrn_reg(hw,
1483 E1000_KMRNCTRLSTA_CTRL_OFFSET, 1526 E1000_KMRNCTRLSTA_CTRL_OFFSET,
@@ -1503,17 +1546,12 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
1503 goto out; 1546 goto out;
1504 1547
1505 /* Write PHY register values back to h/w defaults */ 1548 /* Write PHY register values back to h/w defaults */
1506 e1e_rphy(hw, PHY_REG(769, 20), &data);
1507 ret_val = e1e_wphy(hw, PHY_REG(769, 20), data & ~(1 << 14));
1508 if (ret_val)
1509 goto out;
1510 e1e_rphy(hw, PHY_REG(769, 23), &data); 1549 e1e_rphy(hw, PHY_REG(769, 23), &data);
1511 data &= ~(0x7F << 5); 1550 data &= ~(0x7F << 5);
1512 ret_val = e1e_wphy(hw, PHY_REG(769, 23), data); 1551 ret_val = e1e_wphy(hw, PHY_REG(769, 23), data);
1513 if (ret_val) 1552 if (ret_val)
1514 goto out; 1553 goto out;
1515 e1e_rphy(hw, PHY_REG(769, 16), &data); 1554 e1e_rphy(hw, PHY_REG(769, 16), &data);
1516 data &= ~(1 << 12);
1517 data |= (1 << 13); 1555 data |= (1 << 13);
1518 ret_val = e1e_wphy(hw, PHY_REG(769, 16), data); 1556 ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
1519 if (ret_val) 1557 if (ret_val)
@@ -1559,6 +1597,69 @@ out:
1559} 1597}
1560 1598
1561/** 1599/**
1600 * e1000_k1_gig_workaround_lv - K1 Si workaround
1601 * @hw: pointer to the HW structure
1602 *
1603 * Workaround to set the K1 beacon duration for 82579 parts
1604 **/
1605static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
1606{
1607 s32 ret_val = 0;
1608 u16 status_reg = 0;
1609 u32 mac_reg;
1610
1611 if (hw->mac.type != e1000_pch2lan)
1612 goto out;
1613
1614 /* Set K1 beacon duration based on 1Gbps speed or otherwise */
1615 ret_val = e1e_rphy(hw, HV_M_STATUS, &status_reg);
1616 if (ret_val)
1617 goto out;
1618
1619 if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
1620 == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
1621 mac_reg = er32(FEXTNVM4);
1622 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
1623
1624 if (status_reg & HV_M_STATUS_SPEED_1000)
1625 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
1626 else
1627 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
1628
1629 ew32(FEXTNVM4, mac_reg);
1630 }
1631
1632out:
1633 return ret_val;
1634}
1635
1636/**
1637 * e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
1638 * @hw: pointer to the HW structure
1639 * @gate: boolean set to true to gate, false to ungate
1640 *
1641 * Gate/ungate the automatic PHY configuration via hardware; perform
1642 * the configuration via software instead.
1643 **/
1644static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
1645{
1646 u32 extcnf_ctrl;
1647
1648 if (hw->mac.type != e1000_pch2lan)
1649 return;
1650
1651 extcnf_ctrl = er32(EXTCNF_CTRL);
1652
1653 if (gate)
1654 extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
1655 else
1656 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
1657
1658 ew32(EXTCNF_CTRL, extcnf_ctrl);
1659 return;
1660}
1661
1662/**
1562 * e1000_lan_init_done_ich8lan - Check for PHY config completion 1663 * e1000_lan_init_done_ich8lan - Check for PHY config completion
1563 * @hw: pointer to the HW structure 1664 * @hw: pointer to the HW structure
1564 * 1665 *
@@ -1602,6 +1703,9 @@ static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
1602 if (e1000_check_reset_block(hw)) 1703 if (e1000_check_reset_block(hw))
1603 goto out; 1704 goto out;
1604 1705
1706 /* Allow time for h/w to get to quiescent state after reset */
1707 msleep(10);
1708
1605 /* Perform any necessary post-reset workarounds */ 1709 /* Perform any necessary post-reset workarounds */
1606 switch (hw->mac.type) { 1710 switch (hw->mac.type) {
1607 case e1000_pchlan: 1711 case e1000_pchlan:
@@ -1630,6 +1734,13 @@ static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
1630 /* Configure the LCD with the OEM bits in NVM */ 1734 /* Configure the LCD with the OEM bits in NVM */
1631 ret_val = e1000_oem_bits_config_ich8lan(hw, true); 1735 ret_val = e1000_oem_bits_config_ich8lan(hw, true);
1632 1736
1737 /* Ungate automatic PHY configuration on non-managed 82579 */
1738 if ((hw->mac.type == e1000_pch2lan) &&
1739 !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
1740 msleep(10);
1741 e1000_gate_hw_phy_config_ich8lan(hw, false);
1742 }
1743
1633out: 1744out:
1634 return ret_val; 1745 return ret_val;
1635} 1746}
@@ -1646,6 +1757,11 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
1646{ 1757{
1647 s32 ret_val = 0; 1758 s32 ret_val = 0;
1648 1759
1760 /* Gate automatic PHY configuration by hardware on non-managed 82579 */
1761 if ((hw->mac.type == e1000_pch2lan) &&
1762 !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
1763 e1000_gate_hw_phy_config_ich8lan(hw, true);
1764
1649 ret_val = e1000e_phy_hw_reset_generic(hw); 1765 ret_val = e1000e_phy_hw_reset_generic(hw);
1650 if (ret_val) 1766 if (ret_val)
1651 goto out; 1767 goto out;
@@ -2910,6 +3026,14 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
2910 * external PHY is reset. 3026 * external PHY is reset.
2911 */ 3027 */
2912 ctrl |= E1000_CTRL_PHY_RST; 3028 ctrl |= E1000_CTRL_PHY_RST;
3029
3030 /*
3031 * Gate automatic PHY configuration by hardware on
3032 * non-managed 82579
3033 */
3034 if ((hw->mac.type == e1000_pch2lan) &&
3035 !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
3036 e1000_gate_hw_phy_config_ich8lan(hw, true);
2913 } 3037 }
2914 ret_val = e1000_acquire_swflag_ich8lan(hw); 3038 ret_val = e1000_acquire_swflag_ich8lan(hw);
2915 e_dbg("Issuing a global reset to ich8lan\n"); 3039 e_dbg("Issuing a global reset to ich8lan\n");
@@ -3460,13 +3584,20 @@ void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
3460void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw) 3584void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw)
3461{ 3585{
3462 u32 phy_ctrl; 3586 u32 phy_ctrl;
3587 s32 ret_val;
3463 3588
3464 phy_ctrl = er32(PHY_CTRL); 3589 phy_ctrl = er32(PHY_CTRL);
3465 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU | E1000_PHY_CTRL_GBE_DISABLE; 3590 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU | E1000_PHY_CTRL_GBE_DISABLE;
3466 ew32(PHY_CTRL, phy_ctrl); 3591 ew32(PHY_CTRL, phy_ctrl);
3467 3592
3468 if (hw->mac.type >= e1000_pchlan) 3593 if (hw->mac.type >= e1000_pchlan) {
3469 e1000_phy_hw_reset_ich8lan(hw); 3594 e1000_oem_bits_config_ich8lan(hw, true);
3595 ret_val = hw->phy.ops.acquire(hw);
3596 if (ret_val)
3597 return;
3598 e1000_write_smbus_addr(hw);
3599 hw->phy.ops.release(hw);
3600 }
3470} 3601}
3471 3602
3472/** 3603/**
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index df4a27922931..0fd4eb5ac5fb 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -183,6 +183,16 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
183 u16 offset, nvm_alt_mac_addr_offset, nvm_data; 183 u16 offset, nvm_alt_mac_addr_offset, nvm_data;
184 u8 alt_mac_addr[ETH_ALEN]; 184 u8 alt_mac_addr[ETH_ALEN];
185 185
186 ret_val = e1000_read_nvm(hw, NVM_COMPAT, 1, &nvm_data);
187 if (ret_val)
188 goto out;
189
190 /* Check for LOM (vs. NIC) or one of two valid mezzanine cards */
191 if (!((nvm_data & NVM_COMPAT_LOM) ||
192 (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_DUAL) ||
193 (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD)))
194 goto out;
195
186 ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1, 196 ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1,
187 &nvm_alt_mac_addr_offset); 197 &nvm_alt_mac_addr_offset);
188 if (ret_val) { 198 if (ret_val) {
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 2b8ef44bd2b1..e561d15c3eb1 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -2704,6 +2704,16 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
2704 u32 psrctl = 0; 2704 u32 psrctl = 0;
2705 u32 pages = 0; 2705 u32 pages = 0;
2706 2706
2707 /* Workaround Si errata on 82579 - configure jumbo frame flow */
2708 if (hw->mac.type == e1000_pch2lan) {
2709 s32 ret_val;
2710
2711 if (adapter->netdev->mtu > ETH_DATA_LEN)
2712 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
2713 else
2714 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
2715 }
2716
2707 /* Program MC offset vector base */ 2717 /* Program MC offset vector base */
2708 rctl = er32(RCTL); 2718 rctl = er32(RCTL);
2709 rctl &= ~(3 << E1000_RCTL_MO_SHIFT); 2719 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
@@ -2744,16 +2754,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
2744 e1e_wphy(hw, 22, phy_data); 2754 e1e_wphy(hw, 22, phy_data);
2745 } 2755 }
2746 2756
2747 /* Workaround Si errata on 82579 - configure jumbo frame flow */
2748 if (hw->mac.type == e1000_pch2lan) {
2749 s32 ret_val;
2750
2751 if (rctl & E1000_RCTL_LPE)
2752 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
2753 else
2754 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
2755 }
2756
2757 /* Setup buffer sizes */ 2757 /* Setup buffer sizes */
2758 rctl &= ~E1000_RCTL_SZ_4096; 2758 rctl &= ~E1000_RCTL_SZ_4096;
2759 rctl |= E1000_RCTL_BSEX; 2759 rctl |= E1000_RCTL_BSEX;
@@ -4833,6 +4833,15 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
4833 return -EINVAL; 4833 return -EINVAL;
4834 } 4834 }
4835 4835
4836 /* Jumbo frame workaround on 82579 requires CRC be stripped */
4837 if ((adapter->hw.mac.type == e1000_pch2lan) &&
4838 !(adapter->flags2 & FLAG2_CRC_STRIPPING) &&
4839 (new_mtu > ETH_DATA_LEN)) {
4840 e_err("Jumbo Frames not supported on 82579 when CRC "
4841 "stripping is disabled.\n");
4842 return -EINVAL;
4843 }
4844
4836 /* 82573 Errata 17 */ 4845 /* 82573 Errata 17 */
4837 if (((adapter->hw.mac.type == e1000_82573) || 4846 if (((adapter->hw.mac.type == e1000_82573) ||
4838 (adapter->hw.mac.type == e1000_82574)) && 4847 (adapter->hw.mac.type == e1000_82574)) &&
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index 99a929964e3c..1846623c6ae6 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -40,7 +40,7 @@
40#include <asm/io.h> 40#include <asm/io.h>
41 41
42#define DRV_NAME "ehea" 42#define DRV_NAME "ehea"
43#define DRV_VERSION "EHEA_0105" 43#define DRV_VERSION "EHEA_0106"
44 44
45/* eHEA capability flags */ 45/* eHEA capability flags */
46#define DLPAR_PORT_ADD_REM 1 46#define DLPAR_PORT_ADD_REM 1
@@ -400,6 +400,7 @@ struct ehea_port_res {
400 u32 poll_counter; 400 u32 poll_counter;
401 struct net_lro_mgr lro_mgr; 401 struct net_lro_mgr lro_mgr;
402 struct net_lro_desc lro_desc[MAX_LRO_DESCRIPTORS]; 402 struct net_lro_desc lro_desc[MAX_LRO_DESCRIPTORS];
403 int sq_restart_flag;
403}; 404};
404 405
405 406
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 897719b49f96..6372610ed240 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -533,8 +533,15 @@ static inline void ehea_fill_skb(struct net_device *dev,
533 int length = cqe->num_bytes_transfered - 4; /*remove CRC */ 533 int length = cqe->num_bytes_transfered - 4; /*remove CRC */
534 534
535 skb_put(skb, length); 535 skb_put(skb, length);
536 skb->ip_summed = CHECKSUM_UNNECESSARY;
537 skb->protocol = eth_type_trans(skb, dev); 536 skb->protocol = eth_type_trans(skb, dev);
537
538 /* The packet was not an IPV4 packet so a complemented checksum was
539 calculated. The value is found in the Internet Checksum field. */
540 if (cqe->status & EHEA_CQE_BLIND_CKSUM) {
541 skb->ip_summed = CHECKSUM_COMPLETE;
542 skb->csum = csum_unfold(~cqe->inet_checksum_value);
543 } else
544 skb->ip_summed = CHECKSUM_UNNECESSARY;
538} 545}
539 546
540static inline struct sk_buff *get_skb_by_index(struct sk_buff **skb_array, 547static inline struct sk_buff *get_skb_by_index(struct sk_buff **skb_array,
@@ -776,6 +783,53 @@ static int ehea_proc_rwqes(struct net_device *dev,
776 return processed; 783 return processed;
777} 784}
778 785
786#define SWQE_RESTART_CHECK 0xdeadbeaff00d0000ull
787
788static void reset_sq_restart_flag(struct ehea_port *port)
789{
790 int i;
791
792 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
793 struct ehea_port_res *pr = &port->port_res[i];
794 pr->sq_restart_flag = 0;
795 }
796}
797
798static void check_sqs(struct ehea_port *port)
799{
800 struct ehea_swqe *swqe;
801 int swqe_index;
802 int i, k;
803
804 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
805 struct ehea_port_res *pr = &port->port_res[i];
806 k = 0;
807 swqe = ehea_get_swqe(pr->qp, &swqe_index);
808 memset(swqe, 0, SWQE_HEADER_SIZE);
809 atomic_dec(&pr->swqe_avail);
810
811 swqe->tx_control |= EHEA_SWQE_PURGE;
812 swqe->wr_id = SWQE_RESTART_CHECK;
813 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
814 swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT;
815 swqe->immediate_data_length = 80;
816
817 ehea_post_swqe(pr->qp, swqe);
818
819 while (pr->sq_restart_flag == 0) {
820 msleep(5);
821 if (++k == 100) {
822 ehea_error("HW/SW queues out of sync");
823 ehea_schedule_port_reset(pr->port);
824 return;
825 }
826 }
827 }
828
829 return;
830}
831
832
779static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota) 833static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
780{ 834{
781 struct sk_buff *skb; 835 struct sk_buff *skb;
@@ -793,6 +847,13 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
793 847
794 cqe_counter++; 848 cqe_counter++;
795 rmb(); 849 rmb();
850
851 if (cqe->wr_id == SWQE_RESTART_CHECK) {
852 pr->sq_restart_flag = 1;
853 swqe_av++;
854 break;
855 }
856
796 if (cqe->status & EHEA_CQE_STAT_ERR_MASK) { 857 if (cqe->status & EHEA_CQE_STAT_ERR_MASK) {
797 ehea_error("Bad send completion status=0x%04X", 858 ehea_error("Bad send completion status=0x%04X",
798 cqe->status); 859 cqe->status);
@@ -2675,8 +2736,10 @@ static void ehea_flush_sq(struct ehea_port *port)
2675 int k = 0; 2736 int k = 0;
2676 while (atomic_read(&pr->swqe_avail) < swqe_max) { 2737 while (atomic_read(&pr->swqe_avail) < swqe_max) {
2677 msleep(5); 2738 msleep(5);
2678 if (++k == 20) 2739 if (++k == 20) {
2740 ehea_error("WARNING: sq not flushed completely");
2679 break; 2741 break;
2742 }
2680 } 2743 }
2681 } 2744 }
2682} 2745}
@@ -2917,6 +2980,7 @@ static void ehea_rereg_mrs(struct work_struct *work)
2917 port_napi_disable(port); 2980 port_napi_disable(port);
2918 mutex_unlock(&port->port_lock); 2981 mutex_unlock(&port->port_lock);
2919 } 2982 }
2983 reset_sq_restart_flag(port);
2920 } 2984 }
2921 2985
2922 /* Unregister old memory region */ 2986 /* Unregister old memory region */
@@ -2951,6 +3015,7 @@ static void ehea_rereg_mrs(struct work_struct *work)
2951 mutex_lock(&port->port_lock); 3015 mutex_lock(&port->port_lock);
2952 port_napi_enable(port); 3016 port_napi_enable(port);
2953 ret = ehea_restart_qps(dev); 3017 ret = ehea_restart_qps(dev);
3018 check_sqs(port);
2954 if (!ret) 3019 if (!ret)
2955 netif_wake_queue(dev); 3020 netif_wake_queue(dev);
2956 mutex_unlock(&port->port_lock); 3021 mutex_unlock(&port->port_lock);
diff --git a/drivers/net/ehea/ehea_qmr.h b/drivers/net/ehea/ehea_qmr.h
index f608a6c54af5..38104734a3be 100644
--- a/drivers/net/ehea/ehea_qmr.h
+++ b/drivers/net/ehea/ehea_qmr.h
@@ -150,6 +150,7 @@ struct ehea_rwqe {
150#define EHEA_CQE_TYPE_RQ 0x60 150#define EHEA_CQE_TYPE_RQ 0x60
151#define EHEA_CQE_STAT_ERR_MASK 0x700F 151#define EHEA_CQE_STAT_ERR_MASK 0x700F
152#define EHEA_CQE_STAT_FAT_ERR_MASK 0xF 152#define EHEA_CQE_STAT_FAT_ERR_MASK 0xF
153#define EHEA_CQE_BLIND_CKSUM 0x8000
153#define EHEA_CQE_STAT_ERR_TCP 0x4000 154#define EHEA_CQE_STAT_ERR_TCP 0x4000
154#define EHEA_CQE_STAT_ERR_IP 0x2000 155#define EHEA_CQE_STAT_ERR_IP 0x2000
155#define EHEA_CQE_STAT_ERR_CRC 0x1000 156#define EHEA_CQE_STAT_ERR_CRC 0x1000
diff --git a/drivers/net/eql.c b/drivers/net/eql.c
index dda2c7944da9..0cb1cf9cf4b0 100644
--- a/drivers/net/eql.c
+++ b/drivers/net/eql.c
@@ -555,6 +555,8 @@ static int eql_g_master_cfg(struct net_device *dev, master_config_t __user *mcp)
555 equalizer_t *eql; 555 equalizer_t *eql;
556 master_config_t mc; 556 master_config_t mc;
557 557
558 memset(&mc, 0, sizeof(master_config_t));
559
558 if (eql_is_master(dev)) { 560 if (eql_is_master(dev)) {
559 eql = netdev_priv(dev); 561 eql = netdev_priv(dev);
560 mc.max_slaves = eql->max_slaves; 562 mc.max_slaves = eql->max_slaves;
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index 768b840aeb6b..cce32d43175f 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -678,24 +678,37 @@ static int fec_enet_mii_probe(struct net_device *dev)
678{ 678{
679 struct fec_enet_private *fep = netdev_priv(dev); 679 struct fec_enet_private *fep = netdev_priv(dev);
680 struct phy_device *phy_dev = NULL; 680 struct phy_device *phy_dev = NULL;
681 int ret; 681 char mdio_bus_id[MII_BUS_ID_SIZE];
682 char phy_name[MII_BUS_ID_SIZE + 3];
683 int phy_id;
682 684
683 fep->phy_dev = NULL; 685 fep->phy_dev = NULL;
684 686
685 /* find the first phy */ 687 /* check for attached phy */
686 phy_dev = phy_find_first(fep->mii_bus); 688 for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) {
687 if (!phy_dev) { 689 if ((fep->mii_bus->phy_mask & (1 << phy_id)))
688 printk(KERN_ERR "%s: no PHY found\n", dev->name); 690 continue;
689 return -ENODEV; 691 if (fep->mii_bus->phy_map[phy_id] == NULL)
692 continue;
693 if (fep->mii_bus->phy_map[phy_id]->phy_id == 0)
694 continue;
695 strncpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE);
696 break;
690 } 697 }
691 698
692 /* attach the mac to the phy */ 699 if (phy_id >= PHY_MAX_ADDR) {
693 ret = phy_connect_direct(dev, phy_dev, 700 printk(KERN_INFO "%s: no PHY, assuming direct connection "
694 &fec_enet_adjust_link, 0, 701 "to switch\n", dev->name);
695 PHY_INTERFACE_MODE_MII); 702 strncpy(mdio_bus_id, "0", MII_BUS_ID_SIZE);
696 if (ret) { 703 phy_id = 0;
697 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); 704 }
698 return ret; 705
706 snprintf(phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, mdio_bus_id, phy_id);
707 phy_dev = phy_connect(dev, phy_name, &fec_enet_adjust_link, 0,
708 PHY_INTERFACE_MODE_MII);
709 if (IS_ERR(phy_dev)) {
710 printk(KERN_ERR "%s: could not attach to PHY\n", dev->name);
711 return PTR_ERR(phy_dev);
699 } 712 }
700 713
701 /* mask with MAC supported features */ 714 /* mask with MAC supported features */
@@ -738,7 +751,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
738 fep->mii_bus->read = fec_enet_mdio_read; 751 fep->mii_bus->read = fec_enet_mdio_read;
739 fep->mii_bus->write = fec_enet_mdio_write; 752 fep->mii_bus->write = fec_enet_mdio_write;
740 fep->mii_bus->reset = fec_enet_mdio_reset; 753 fep->mii_bus->reset = fec_enet_mdio_reset;
741 snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id); 754 snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id + 1);
742 fep->mii_bus->priv = fep; 755 fep->mii_bus->priv = fep;
743 fep->mii_bus->parent = &pdev->dev; 756 fep->mii_bus->parent = &pdev->dev;
744 757
@@ -1311,6 +1324,9 @@ fec_probe(struct platform_device *pdev)
1311 if (ret) 1324 if (ret)
1312 goto failed_mii_init; 1325 goto failed_mii_init;
1313 1326
1327 /* Carrier starts down, phylib will bring it up */
1328 netif_carrier_off(ndev);
1329
1314 ret = register_netdev(ndev); 1330 ret = register_netdev(ndev);
1315 if (ret) 1331 if (ret)
1316 goto failed_register; 1332 goto failed_register;
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index 3506fd6ad726..519e19e23955 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -2928,7 +2928,7 @@ static int __devinit emac_probe(struct platform_device *ofdev,
2928 if (dev->emac_irq != NO_IRQ) 2928 if (dev->emac_irq != NO_IRQ)
2929 irq_dispose_mapping(dev->emac_irq); 2929 irq_dispose_mapping(dev->emac_irq);
2930 err_free: 2930 err_free:
2931 kfree(ndev); 2931 free_netdev(ndev);
2932 err_gone: 2932 err_gone:
2933 /* if we were on the bootlist, remove us as we won't show up and 2933 /* if we were on the bootlist, remove us as we won't show up and
2934 * wake up all waiters to notify them in case they were waiting 2934 * wake up all waiters to notify them in case they were waiting
@@ -2971,7 +2971,7 @@ static int __devexit emac_remove(struct platform_device *ofdev)
2971 if (dev->emac_irq != NO_IRQ) 2971 if (dev->emac_irq != NO_IRQ)
2972 irq_dispose_mapping(dev->emac_irq); 2972 irq_dispose_mapping(dev->emac_irq);
2973 2973
2974 kfree(dev->ndev); 2974 free_netdev(dev->ndev);
2975 2975
2976 return 0; 2976 return 0;
2977} 2977}
diff --git a/drivers/net/ibm_newemac/debug.c b/drivers/net/ibm_newemac/debug.c
index 3995fafc1e08..8c6c1e2a8750 100644
--- a/drivers/net/ibm_newemac/debug.c
+++ b/drivers/net/ibm_newemac/debug.c
@@ -238,7 +238,7 @@ void emac_dbg_dump_all(void)
238} 238}
239 239
240#if defined(CONFIG_MAGIC_SYSRQ) 240#if defined(CONFIG_MAGIC_SYSRQ)
241static void emac_sysrq_handler(int key, struct tty_struct *tty) 241static void emac_sysrq_handler(int key)
242{ 242{
243 emac_dbg_dump_all(); 243 emac_dbg_dump_all();
244} 244}
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index 2602852cc55a..4734c939ad03 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -1113,7 +1113,8 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
1113 struct ibmveth_adapter *adapter = netdev_priv(dev); 1113 struct ibmveth_adapter *adapter = netdev_priv(dev);
1114 struct vio_dev *viodev = adapter->vdev; 1114 struct vio_dev *viodev = adapter->vdev;
1115 int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH; 1115 int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH;
1116 int i; 1116 int i, rc;
1117 int need_restart = 0;
1117 1118
1118 if (new_mtu < IBMVETH_MAX_MTU) 1119 if (new_mtu < IBMVETH_MAX_MTU)
1119 return -EINVAL; 1120 return -EINVAL;
@@ -1127,35 +1128,32 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
1127 1128
1128 /* Deactivate all the buffer pools so that the next loop can activate 1129 /* Deactivate all the buffer pools so that the next loop can activate
1129 only the buffer pools necessary to hold the new MTU */ 1130 only the buffer pools necessary to hold the new MTU */
1130 for (i = 0; i < IbmVethNumBufferPools; i++) 1131 if (netif_running(adapter->netdev)) {
1131 if (adapter->rx_buff_pool[i].active) { 1132 need_restart = 1;
1132 ibmveth_free_buffer_pool(adapter, 1133 adapter->pool_config = 1;
1133 &adapter->rx_buff_pool[i]); 1134 ibmveth_close(adapter->netdev);
1134 adapter->rx_buff_pool[i].active = 0; 1135 adapter->pool_config = 0;
1135 } 1136 }
1136 1137
1137 /* Look for an active buffer pool that can hold the new MTU */ 1138 /* Look for an active buffer pool that can hold the new MTU */
1138 for(i = 0; i<IbmVethNumBufferPools; i++) { 1139 for(i = 0; i<IbmVethNumBufferPools; i++) {
1139 adapter->rx_buff_pool[i].active = 1; 1140 adapter->rx_buff_pool[i].active = 1;
1140 1141
1141 if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) { 1142 if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) {
1142 if (netif_running(adapter->netdev)) {
1143 adapter->pool_config = 1;
1144 ibmveth_close(adapter->netdev);
1145 adapter->pool_config = 0;
1146 dev->mtu = new_mtu;
1147 vio_cmo_set_dev_desired(viodev,
1148 ibmveth_get_desired_dma
1149 (viodev));
1150 return ibmveth_open(adapter->netdev);
1151 }
1152 dev->mtu = new_mtu; 1143 dev->mtu = new_mtu;
1153 vio_cmo_set_dev_desired(viodev, 1144 vio_cmo_set_dev_desired(viodev,
1154 ibmveth_get_desired_dma 1145 ibmveth_get_desired_dma
1155 (viodev)); 1146 (viodev));
1147 if (need_restart) {
1148 return ibmveth_open(adapter->netdev);
1149 }
1156 return 0; 1150 return 0;
1157 } 1151 }
1158 } 1152 }
1153
1154 if (need_restart && (rc = ibmveth_open(adapter->netdev)))
1155 return rc;
1156
1159 return -EINVAL; 1157 return -EINVAL;
1160} 1158}
1161 1159
diff --git a/drivers/net/ks8851.c b/drivers/net/ks8851.c
index b4fb07a6f13f..51919fcd50c2 100644
--- a/drivers/net/ks8851.c
+++ b/drivers/net/ks8851.c
@@ -503,30 +503,33 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
503 ks8851_wrreg16(ks, KS_RXQCR, 503 ks8851_wrreg16(ks, KS_RXQCR,
504 ks->rc_rxqcr | RXQCR_SDA | RXQCR_ADRFE); 504 ks->rc_rxqcr | RXQCR_SDA | RXQCR_ADRFE);
505 505
506 if (rxlen > 0) { 506 if (rxlen > 4) {
507 skb = netdev_alloc_skb(ks->netdev, rxlen + 2 + 8); 507 unsigned int rxalign;
508 if (!skb) { 508
509 /* todo - dump frame and move on */ 509 rxlen -= 4;
510 } 510 rxalign = ALIGN(rxlen, 4);
511 skb = netdev_alloc_skb_ip_align(ks->netdev, rxalign);
512 if (skb) {
511 513
512 /* two bytes to ensure ip is aligned, and four bytes 514 /* 4 bytes of status header + 4 bytes of
513 * for the status header and 4 bytes of garbage */ 515 * garbage: we put them before ethernet
514 skb_reserve(skb, 2 + 4 + 4); 516 * header, so that they are copied,
517 * but ignored.
518 */
515 519
516 rxpkt = skb_put(skb, rxlen - 4) - 8; 520 rxpkt = skb_put(skb, rxlen) - 8;
517 521
518 /* align the packet length to 4 bytes, and add 4 bytes 522 ks8851_rdfifo(ks, rxpkt, rxalign + 8);
519 * as we're getting the rx status header as well */
520 ks8851_rdfifo(ks, rxpkt, ALIGN(rxlen, 4) + 8);
521 523
522 if (netif_msg_pktdata(ks)) 524 if (netif_msg_pktdata(ks))
523 ks8851_dbg_dumpkkt(ks, rxpkt); 525 ks8851_dbg_dumpkkt(ks, rxpkt);
524 526
525 skb->protocol = eth_type_trans(skb, ks->netdev); 527 skb->protocol = eth_type_trans(skb, ks->netdev);
526 netif_rx(skb); 528 netif_rx(skb);
527 529
528 ks->netdev->stats.rx_packets++; 530 ks->netdev->stats.rx_packets++;
529 ks->netdev->stats.rx_bytes += rxlen - 4; 531 ks->netdev->stats.rx_bytes += rxlen;
532 }
530 } 533 }
531 534
532 ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr); 535 ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
diff --git a/drivers/net/ll_temac_main.c b/drivers/net/ll_temac_main.c
index c7b624711f5e..87f0a93b165c 100644
--- a/drivers/net/ll_temac_main.c
+++ b/drivers/net/ll_temac_main.c
@@ -38,6 +38,7 @@
38#include <linux/of_device.h> 38#include <linux/of_device.h>
39#include <linux/of_mdio.h> 39#include <linux/of_mdio.h>
40#include <linux/of_platform.h> 40#include <linux/of_platform.h>
41#include <linux/of_address.h>
41#include <linux/skbuff.h> 42#include <linux/skbuff.h>
42#include <linux/spinlock.h> 43#include <linux/spinlock.h>
43#include <linux/tcp.h> /* needed for sizeof(tcphdr) */ 44#include <linux/tcp.h> /* needed for sizeof(tcphdr) */
@@ -902,8 +903,8 @@ temac_poll_controller(struct net_device *ndev)
902 disable_irq(lp->tx_irq); 903 disable_irq(lp->tx_irq);
903 disable_irq(lp->rx_irq); 904 disable_irq(lp->rx_irq);
904 905
905 ll_temac_rx_irq(lp->tx_irq, lp); 906 ll_temac_rx_irq(lp->tx_irq, ndev);
906 ll_temac_tx_irq(lp->rx_irq, lp); 907 ll_temac_tx_irq(lp->rx_irq, ndev);
907 908
908 enable_irq(lp->tx_irq); 909 enable_irq(lp->tx_irq);
909 enable_irq(lp->rx_irq); 910 enable_irq(lp->rx_irq);
diff --git a/drivers/net/ll_temac_mdio.c b/drivers/net/ll_temac_mdio.c
index 5ae28c975b38..8cf9d4f56bb2 100644
--- a/drivers/net/ll_temac_mdio.c
+++ b/drivers/net/ll_temac_mdio.c
@@ -10,6 +10,7 @@
10#include <linux/phy.h> 10#include <linux/phy.h>
11#include <linux/of.h> 11#include <linux/of.h>
12#include <linux/of_device.h> 12#include <linux/of_device.h>
13#include <linux/of_address.h>
13#include <linux/slab.h> 14#include <linux/slab.h>
14#include <linux/of_mdio.h> 15#include <linux/of_mdio.h>
15 16
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index ffa1b9ce1cc5..6dca3574e355 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -53,8 +53,8 @@
53 53
54#define _NETXEN_NIC_LINUX_MAJOR 4 54#define _NETXEN_NIC_LINUX_MAJOR 4
55#define _NETXEN_NIC_LINUX_MINOR 0 55#define _NETXEN_NIC_LINUX_MINOR 0
56#define _NETXEN_NIC_LINUX_SUBVERSION 73 56#define _NETXEN_NIC_LINUX_SUBVERSION 74
57#define NETXEN_NIC_LINUX_VERSIONID "4.0.73" 57#define NETXEN_NIC_LINUX_VERSIONID "4.0.74"
58 58
59#define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c)) 59#define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c))
60#define _major(v) (((v) >> 24) & 0xff) 60#define _major(v) (((v) >> 24) & 0xff)
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index c865dda2adf1..b075a35b85d4 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -1540,7 +1540,6 @@ netxen_process_rcv(struct netxen_adapter *adapter,
1540 if (pkt_offset) 1540 if (pkt_offset)
1541 skb_pull(skb, pkt_offset); 1541 skb_pull(skb, pkt_offset);
1542 1542
1543 skb->truesize = skb->len + sizeof(struct sk_buff);
1544 skb->protocol = eth_type_trans(skb, netdev); 1543 skb->protocol = eth_type_trans(skb, netdev);
1545 1544
1546 napi_gro_receive(&sds_ring->napi, skb); 1545 napi_gro_receive(&sds_ring->napi, skb);
@@ -1602,8 +1601,6 @@ netxen_process_lro(struct netxen_adapter *adapter,
1602 1601
1603 skb_put(skb, lro_length + data_offset); 1602 skb_put(skb, lro_length + data_offset);
1604 1603
1605 skb->truesize = skb->len + sizeof(struct sk_buff) + skb_headroom(skb);
1606
1607 skb_pull(skb, l2_hdr_offset); 1604 skb_pull(skb, l2_hdr_offset);
1608 skb->protocol = eth_type_trans(skb, netdev); 1605 skb->protocol = eth_type_trans(skb, netdev);
1609 1606
@@ -1805,8 +1802,6 @@ netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ringid,
1805 netxen_ctx_msg msg = 0; 1802 netxen_ctx_msg msg = 0;
1806 struct list_head *head; 1803 struct list_head *head;
1807 1804
1808 spin_lock(&rds_ring->lock);
1809
1810 producer = rds_ring->producer; 1805 producer = rds_ring->producer;
1811 1806
1812 head = &rds_ring->free_list; 1807 head = &rds_ring->free_list;
@@ -1853,8 +1848,6 @@ netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ringid,
1853 NETXEN_RCV_PRODUCER_OFFSET), msg); 1848 NETXEN_RCV_PRODUCER_OFFSET), msg);
1854 } 1849 }
1855 } 1850 }
1856
1857 spin_unlock(&rds_ring->lock);
1858} 1851}
1859 1852
1860static void 1853static void
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index fd86e18604e6..73d314592230 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -2032,8 +2032,6 @@ struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev)
2032 struct netxen_adapter *adapter = netdev_priv(netdev); 2032 struct netxen_adapter *adapter = netdev_priv(netdev);
2033 struct net_device_stats *stats = &netdev->stats; 2033 struct net_device_stats *stats = &netdev->stats;
2034 2034
2035 memset(stats, 0, sizeof(*stats));
2036
2037 stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts; 2035 stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
2038 stats->tx_packets = adapter->stats.xmitfinished; 2036 stats->tx_packets = adapter->stats.xmitfinished;
2039 stats->rx_bytes = adapter->stats.rxbytes; 2037 stats->rx_bytes = adapter->stats.rxbytes;
@@ -2133,9 +2131,16 @@ static int netxen_nic_poll(struct napi_struct *napi, int budget)
2133#ifdef CONFIG_NET_POLL_CONTROLLER 2131#ifdef CONFIG_NET_POLL_CONTROLLER
2134static void netxen_nic_poll_controller(struct net_device *netdev) 2132static void netxen_nic_poll_controller(struct net_device *netdev)
2135{ 2133{
2134 int ring;
2135 struct nx_host_sds_ring *sds_ring;
2136 struct netxen_adapter *adapter = netdev_priv(netdev); 2136 struct netxen_adapter *adapter = netdev_priv(netdev);
2137 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
2138
2137 disable_irq(adapter->irq); 2139 disable_irq(adapter->irq);
2138 netxen_intr(adapter->irq, adapter); 2140 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
2141 sds_ring = &recv_ctx->sds_rings[ring];
2142 netxen_intr(adapter->irq, sds_ring);
2143 }
2139 enable_irq(adapter->irq); 2144 enable_irq(adapter->irq);
2140} 2145}
2141#endif 2146#endif
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index bc695d53cdcc..fe6983af6918 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -7269,32 +7269,28 @@ static int niu_get_ethtool_tcam_all(struct niu *np,
7269 struct niu_parent *parent = np->parent; 7269 struct niu_parent *parent = np->parent;
7270 struct niu_tcam_entry *tp; 7270 struct niu_tcam_entry *tp;
7271 int i, idx, cnt; 7271 int i, idx, cnt;
7272 u16 n_entries;
7273 unsigned long flags; 7272 unsigned long flags;
7274 7273 int ret = 0;
7275 7274
7276 /* put the tcam size here */ 7275 /* put the tcam size here */
7277 nfc->data = tcam_get_size(np); 7276 nfc->data = tcam_get_size(np);
7278 7277
7279 niu_lock_parent(np, flags); 7278 niu_lock_parent(np, flags);
7280 n_entries = nfc->rule_cnt;
7281 for (cnt = 0, i = 0; i < nfc->data; i++) { 7279 for (cnt = 0, i = 0; i < nfc->data; i++) {
7282 idx = tcam_get_index(np, i); 7280 idx = tcam_get_index(np, i);
7283 tp = &parent->tcam[idx]; 7281 tp = &parent->tcam[idx];
7284 if (!tp->valid) 7282 if (!tp->valid)
7285 continue; 7283 continue;
7284 if (cnt == nfc->rule_cnt) {
7285 ret = -EMSGSIZE;
7286 break;
7287 }
7286 rule_locs[cnt] = i; 7288 rule_locs[cnt] = i;
7287 cnt++; 7289 cnt++;
7288 } 7290 }
7289 niu_unlock_parent(np, flags); 7291 niu_unlock_parent(np, flags);
7290 7292
7291 if (n_entries != cnt) { 7293 return ret;
7292 /* print warning, this should not happen */
7293 netdev_info(np->dev, "niu%d: In %s(): n_entries[%d] != cnt[%d]!!!\n",
7294 np->parent->index, __func__, n_entries, cnt);
7295 }
7296
7297 return 0;
7298} 7294}
7299 7295
7300static int niu_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd, 7296static int niu_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index c3edfe4c2651..f9b509a6b09a 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -508,7 +508,8 @@ static int pcnet_confcheck(struct pcmcia_device *p_dev,
508 unsigned int vcc, 508 unsigned int vcc,
509 void *priv_data) 509 void *priv_data)
510{ 510{
511 int *has_shmem = priv_data; 511 int *priv = priv_data;
512 int try = (*priv & 0x1);
512 int i; 513 int i;
513 cistpl_io_t *io = &cfg->io; 514 cistpl_io_t *io = &cfg->io;
514 515
@@ -525,77 +526,103 @@ static int pcnet_confcheck(struct pcmcia_device *p_dev,
525 i = p_dev->resource[1]->end = 0; 526 i = p_dev->resource[1]->end = 0;
526 } 527 }
527 528
528 *has_shmem = ((cfg->mem.nwin == 1) && 529 *priv &= ((cfg->mem.nwin == 1) &&
529 (cfg->mem.win[0].len >= 0x4000)); 530 (cfg->mem.win[0].len >= 0x4000)) ? 0x10 : ~0x10;
531
530 p_dev->resource[0]->start = io->win[i].base; 532 p_dev->resource[0]->start = io->win[i].base;
531 p_dev->resource[0]->end = io->win[i].len; 533 p_dev->resource[0]->end = io->win[i].len;
532 p_dev->io_lines = io->flags & CISTPL_IO_LINES_MASK; 534 if (!try)
535 p_dev->io_lines = io->flags & CISTPL_IO_LINES_MASK;
536 else
537 p_dev->io_lines = 16;
533 if (p_dev->resource[0]->end + p_dev->resource[1]->end >= 32) 538 if (p_dev->resource[0]->end + p_dev->resource[1]->end >= 32)
534 return try_io_port(p_dev); 539 return try_io_port(p_dev);
535 540
536 return 0; 541 return -EINVAL;
542}
543
544static hw_info_t *pcnet_try_config(struct pcmcia_device *link,
545 int *has_shmem, int try)
546{
547 struct net_device *dev = link->priv;
548 hw_info_t *local_hw_info;
549 pcnet_dev_t *info = PRIV(dev);
550 int priv = try;
551 int ret;
552
553 ret = pcmcia_loop_config(link, pcnet_confcheck, &priv);
554 if (ret) {
555 dev_warn(&link->dev, "no useable port range found\n");
556 return NULL;
557 }
558 *has_shmem = (priv & 0x10);
559
560 if (!link->irq)
561 return NULL;
562
563 if (resource_size(link->resource[1]) == 8) {
564 link->conf.Attributes |= CONF_ENABLE_SPKR;
565 link->conf.Status = CCSR_AUDIO_ENA;
566 }
567 if ((link->manf_id == MANFID_IBM) &&
568 (link->card_id == PRODID_IBM_HOME_AND_AWAY))
569 link->conf.ConfigIndex |= 0x10;
570
571 ret = pcmcia_request_configuration(link, &link->conf);
572 if (ret)
573 return NULL;
574
575 dev->irq = link->irq;
576 dev->base_addr = link->resource[0]->start;
577
578 if (info->flags & HAS_MISC_REG) {
579 if ((if_port == 1) || (if_port == 2))
580 dev->if_port = if_port;
581 else
582 dev_notice(&link->dev, "invalid if_port requested\n");
583 } else
584 dev->if_port = 0;
585
586 if ((link->conf.ConfigBase == 0x03c0) &&
587 (link->manf_id == 0x149) && (link->card_id == 0xc1ab)) {
588 dev_info(&link->dev,
589 "this is an AX88190 card - use axnet_cs instead.\n");
590 return NULL;
591 }
592
593 local_hw_info = get_hwinfo(link);
594 if (!local_hw_info)
595 local_hw_info = get_prom(link);
596 if (!local_hw_info)
597 local_hw_info = get_dl10019(link);
598 if (!local_hw_info)
599 local_hw_info = get_ax88190(link);
600 if (!local_hw_info)
601 local_hw_info = get_hwired(link);
602
603 return local_hw_info;
537} 604}
538 605
539static int pcnet_config(struct pcmcia_device *link) 606static int pcnet_config(struct pcmcia_device *link)
540{ 607{
541 struct net_device *dev = link->priv; 608 struct net_device *dev = link->priv;
542 pcnet_dev_t *info = PRIV(dev); 609 pcnet_dev_t *info = PRIV(dev);
543 int ret, start_pg, stop_pg, cm_offset; 610 int start_pg, stop_pg, cm_offset;
544 int has_shmem = 0; 611 int has_shmem = 0;
545 hw_info_t *local_hw_info; 612 hw_info_t *local_hw_info;
546 613
547 dev_dbg(&link->dev, "pcnet_config\n"); 614 dev_dbg(&link->dev, "pcnet_config\n");
548 615
549 ret = pcmcia_loop_config(link, pcnet_confcheck, &has_shmem); 616 local_hw_info = pcnet_try_config(link, &has_shmem, 0);
550 if (ret) 617 if (!local_hw_info) {
551 goto failed; 618 /* check whether forcing io_lines to 16 helps... */
552 619 pcmcia_disable_device(link);
553 if (!link->irq) 620 local_hw_info = pcnet_try_config(link, &has_shmem, 1);
554 goto failed; 621 if (local_hw_info == NULL) {
555 622 dev_notice(&link->dev, "unable to read hardware net"
556 if (resource_size(link->resource[1]) == 8) { 623 " address for io base %#3lx\n", dev->base_addr);
557 link->conf.Attributes |= CONF_ENABLE_SPKR; 624 goto failed;
558 link->conf.Status = CCSR_AUDIO_ENA; 625 }
559 }
560 if ((link->manf_id == MANFID_IBM) &&
561 (link->card_id == PRODID_IBM_HOME_AND_AWAY))
562 link->conf.ConfigIndex |= 0x10;
563
564 ret = pcmcia_request_configuration(link, &link->conf);
565 if (ret)
566 goto failed;
567 dev->irq = link->irq;
568 dev->base_addr = link->resource[0]->start;
569 if (info->flags & HAS_MISC_REG) {
570 if ((if_port == 1) || (if_port == 2))
571 dev->if_port = if_port;
572 else
573 printk(KERN_NOTICE "pcnet_cs: invalid if_port requested\n");
574 } else {
575 dev->if_port = 0;
576 }
577
578 if ((link->conf.ConfigBase == 0x03c0) &&
579 (link->manf_id == 0x149) && (link->card_id == 0xc1ab)) {
580 printk(KERN_INFO "pcnet_cs: this is an AX88190 card!\n");
581 printk(KERN_INFO "pcnet_cs: use axnet_cs instead.\n");
582 goto failed;
583 }
584
585 local_hw_info = get_hwinfo(link);
586 if (local_hw_info == NULL)
587 local_hw_info = get_prom(link);
588 if (local_hw_info == NULL)
589 local_hw_info = get_dl10019(link);
590 if (local_hw_info == NULL)
591 local_hw_info = get_ax88190(link);
592 if (local_hw_info == NULL)
593 local_hw_info = get_hwired(link);
594
595 if (local_hw_info == NULL) {
596 printk(KERN_NOTICE "pcnet_cs: unable to read hardware net"
597 " address for io base %#3lx\n", dev->base_addr);
598 goto failed;
599 } 626 }
600 627
601 info->flags = local_hw_info->flags; 628 info->flags = local_hw_info->flags;
@@ -1637,6 +1664,7 @@ static struct pcmcia_device_id pcnet_ids[] = {
1637 PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCETTX", 0x547e66dc, 0x6fc5459b), 1664 PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCETTX", 0x547e66dc, 0x6fc5459b),
1638 PCMCIA_DEVICE_PROD_ID12("iPort", "10/100 Ethernet Card", 0x56c538d2, 0x11b0ffc0), 1665 PCMCIA_DEVICE_PROD_ID12("iPort", "10/100 Ethernet Card", 0x56c538d2, 0x11b0ffc0),
1639 PCMCIA_DEVICE_PROD_ID12("KANSAI ELECTRIC CO.,LTD", "KLA-PCM/T", 0xb18dc3b4, 0xcc51a956), 1666 PCMCIA_DEVICE_PROD_ID12("KANSAI ELECTRIC CO.,LTD", "KLA-PCM/T", 0xb18dc3b4, 0xcc51a956),
1667 PCMCIA_DEVICE_PROD_ID12("KENTRONICS", "KEP-230", 0xaf8144c9, 0x868f6616),
1640 PCMCIA_DEVICE_PROD_ID12("KCI", "PE520 PCMCIA Ethernet Adapter", 0xa89b87d3, 0x1eb88e64), 1668 PCMCIA_DEVICE_PROD_ID12("KCI", "PE520 PCMCIA Ethernet Adapter", 0xa89b87d3, 0x1eb88e64),
1641 PCMCIA_DEVICE_PROD_ID12("KINGMAX", "EN10T2T", 0x7bcb459a, 0xa5c81fa5), 1669 PCMCIA_DEVICE_PROD_ID12("KINGMAX", "EN10T2T", 0x7bcb459a, 0xa5c81fa5),
1642 PCMCIA_DEVICE_PROD_ID12("Kingston", "KNE-PC2", 0x1128e633, 0xce2a89b3), 1670 PCMCIA_DEVICE_PROD_ID12("Kingston", "KNE-PC2", 0x1128e633, 0xce2a89b3),
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 6a6b8199a0d6..6c58da2b882c 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -308,7 +308,7 @@ static int mdio_bus_suspend(struct device *dev)
308 * may call phy routines that try to grab the same lock, and that may 308 * may call phy routines that try to grab the same lock, and that may
309 * lead to a deadlock. 309 * lead to a deadlock.
310 */ 310 */
311 if (phydev->attached_dev) 311 if (phydev->attached_dev && phydev->adjust_link)
312 phy_stop_machine(phydev); 312 phy_stop_machine(phydev);
313 313
314 if (!mdio_bus_phy_may_suspend(phydev)) 314 if (!mdio_bus_phy_may_suspend(phydev))
@@ -331,7 +331,7 @@ static int mdio_bus_resume(struct device *dev)
331 return ret; 331 return ret;
332 332
333no_resume: 333no_resume:
334 if (phydev->attached_dev) 334 if (phydev->attached_dev && phydev->adjust_link)
335 phy_start_machine(phydev, NULL); 335 phy_start_machine(phydev, NULL);
336 336
337 return 0; 337 return 0;
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index c0761197c07e..16ddc77313cb 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -466,6 +466,8 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
466 466
467 phydev->interface = interface; 467 phydev->interface = interface;
468 468
469 phydev->state = PHY_READY;
470
469 /* Do initial configuration here, now that 471 /* Do initial configuration here, now that
470 * we have certain key parameters 472 * we have certain key parameters
471 * (dev_flags and interface) */ 473 * (dev_flags and interface) */
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 6695a51e09e9..736b91703b3e 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -1314,8 +1314,13 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
1314 hdrlen = (ppp->flags & SC_MP_XSHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN; 1314 hdrlen = (ppp->flags & SC_MP_XSHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN;
1315 i = 0; 1315 i = 0;
1316 list_for_each_entry(pch, &ppp->channels, clist) { 1316 list_for_each_entry(pch, &ppp->channels, clist) {
1317 navail += pch->avail = (pch->chan != NULL); 1317 if (pch->chan) {
1318 pch->speed = pch->chan->speed; 1318 pch->avail = 1;
1319 navail++;
1320 pch->speed = pch->chan->speed;
1321 } else {
1322 pch->avail = 0;
1323 }
1319 if (pch->avail) { 1324 if (pch->avail) {
1320 if (skb_queue_empty(&pch->file.xq) || 1325 if (skb_queue_empty(&pch->file.xq) ||
1321 !pch->had_frag) { 1326 !pch->had_frag) {
diff --git a/drivers/net/pxa168_eth.c b/drivers/net/pxa168_eth.c
new file mode 100644
index 000000000000..85eddda276bd
--- /dev/null
+++ b/drivers/net/pxa168_eth.c
@@ -0,0 +1,1666 @@
1/*
2 * PXA168 ethernet driver.
3 * Most of the code is derived from mv643xx ethernet driver.
4 *
5 * Copyright (C) 2010 Marvell International Ltd.
6 * Sachin Sanap <ssanap@marvell.com>
7 * Philip Rakity <prakity@marvell.com>
8 * Mark Brown <markb@marvell.com>
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 2
13 * of the License, or (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 */
24
25#include <linux/init.h>
26#include <linux/dma-mapping.h>
27#include <linux/in.h>
28#include <linux/ip.h>
29#include <linux/tcp.h>
30#include <linux/udp.h>
31#include <linux/etherdevice.h>
32#include <linux/bitops.h>
33#include <linux/delay.h>
34#include <linux/ethtool.h>
35#include <linux/platform_device.h>
36#include <linux/module.h>
37#include <linux/kernel.h>
38#include <linux/workqueue.h>
39#include <linux/clk.h>
40#include <linux/phy.h>
41#include <linux/io.h>
42#include <linux/types.h>
43#include <asm/pgtable.h>
44#include <asm/system.h>
45#include <linux/delay.h>
46#include <linux/dma-mapping.h>
47#include <asm/cacheflush.h>
48#include <linux/pxa168_eth.h>
49
50#define DRIVER_NAME "pxa168-eth"
51#define DRIVER_VERSION "0.3"
52
53/*
54 * Registers
55 */
56
57#define PHY_ADDRESS 0x0000
58#define SMI 0x0010
59#define PORT_CONFIG 0x0400
60#define PORT_CONFIG_EXT 0x0408
61#define PORT_COMMAND 0x0410
62#define PORT_STATUS 0x0418
63#define HTPR 0x0428
64#define SDMA_CONFIG 0x0440
65#define SDMA_CMD 0x0448
66#define INT_CAUSE 0x0450
67#define INT_W_CLEAR 0x0454
68#define INT_MASK 0x0458
69#define ETH_F_RX_DESC_0 0x0480
70#define ETH_C_RX_DESC_0 0x04A0
71#define ETH_C_TX_DESC_1 0x04E4
72
73/* smi register */
74#define SMI_BUSY (1 << 28) /* 0 - Write, 1 - Read */
75#define SMI_R_VALID (1 << 27) /* 0 - Write, 1 - Read */
76#define SMI_OP_W (0 << 26) /* Write operation */
77#define SMI_OP_R (1 << 26) /* Read operation */
78
79#define PHY_WAIT_ITERATIONS 10
80
81#define PXA168_ETH_PHY_ADDR_DEFAULT 0
82/* RX & TX descriptor command */
83#define BUF_OWNED_BY_DMA (1 << 31)
84
85/* RX descriptor status */
86#define RX_EN_INT (1 << 23)
87#define RX_FIRST_DESC (1 << 17)
88#define RX_LAST_DESC (1 << 16)
89#define RX_ERROR (1 << 15)
90
91/* TX descriptor command */
92#define TX_EN_INT (1 << 23)
93#define TX_GEN_CRC (1 << 22)
94#define TX_ZERO_PADDING (1 << 18)
95#define TX_FIRST_DESC (1 << 17)
96#define TX_LAST_DESC (1 << 16)
97#define TX_ERROR (1 << 15)
98
99/* SDMA_CMD */
100#define SDMA_CMD_AT (1 << 31)
101#define SDMA_CMD_TXDL (1 << 24)
102#define SDMA_CMD_TXDH (1 << 23)
103#define SDMA_CMD_AR (1 << 15)
104#define SDMA_CMD_ERD (1 << 7)
105
106/* Bit definitions of the Port Config Reg */
107#define PCR_HS (1 << 12)
108#define PCR_EN (1 << 7)
109#define PCR_PM (1 << 0)
110
111/* Bit definitions of the Port Config Extend Reg */
112#define PCXR_2BSM (1 << 28)
113#define PCXR_DSCP_EN (1 << 21)
114#define PCXR_MFL_1518 (0 << 14)
115#define PCXR_MFL_1536 (1 << 14)
116#define PCXR_MFL_2048 (2 << 14)
117#define PCXR_MFL_64K (3 << 14)
118#define PCXR_FLP (1 << 11)
119#define PCXR_PRIO_TX_OFF 3
120#define PCXR_TX_HIGH_PRI (7 << PCXR_PRIO_TX_OFF)
121
122/* Bit definitions of the SDMA Config Reg */
123#define SDCR_BSZ_OFF 12
124#define SDCR_BSZ8 (3 << SDCR_BSZ_OFF)
125#define SDCR_BSZ4 (2 << SDCR_BSZ_OFF)
126#define SDCR_BSZ2 (1 << SDCR_BSZ_OFF)
127#define SDCR_BSZ1 (0 << SDCR_BSZ_OFF)
128#define SDCR_BLMR (1 << 6)
129#define SDCR_BLMT (1 << 7)
130#define SDCR_RIFB (1 << 9)
131#define SDCR_RC_OFF 2
132#define SDCR_RC_MAX_RETRANS (0xf << SDCR_RC_OFF)
133
134/*
135 * Bit definitions of the Interrupt Cause Reg
136 * and Interrupt MASK Reg is the same
137 */
138#define ICR_RXBUF (1 << 0)
139#define ICR_TXBUF_H (1 << 2)
140#define ICR_TXBUF_L (1 << 3)
141#define ICR_TXEND_H (1 << 6)
142#define ICR_TXEND_L (1 << 7)
143#define ICR_RXERR (1 << 8)
144#define ICR_TXERR_H (1 << 10)
145#define ICR_TXERR_L (1 << 11)
146#define ICR_TX_UDR (1 << 13)
147#define ICR_MII_CH (1 << 28)
148
149#define ALL_INTS (ICR_TXBUF_H | ICR_TXBUF_L | ICR_TX_UDR |\
150 ICR_TXERR_H | ICR_TXERR_L |\
151 ICR_TXEND_H | ICR_TXEND_L |\
152 ICR_RXBUF | ICR_RXERR | ICR_MII_CH)
153
154#define ETH_HW_IP_ALIGN 2 /* hw aligns IP header */
155
156#define NUM_RX_DESCS 64
157#define NUM_TX_DESCS 64
158
159#define HASH_ADD 0
160#define HASH_DELETE 1
161#define HASH_ADDR_TABLE_SIZE 0x4000 /* 16K (1/2K address - PCR_HS == 1) */
162#define HOP_NUMBER 12
163
164/* Bit definitions for Port status */
165#define PORT_SPEED_100 (1 << 0)
166#define FULL_DUPLEX (1 << 1)
167#define FLOW_CONTROL_ENABLED (1 << 2)
168#define LINK_UP (1 << 3)
169
170/* Bit definitions for work to be done */
171#define WORK_LINK (1 << 0)
172#define WORK_TX_DONE (1 << 1)
173
174/*
175 * Misc definitions.
176 */
177#define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES)
178
179struct rx_desc {
180 u32 cmd_sts; /* Descriptor command status */
181 u16 byte_cnt; /* Descriptor buffer byte count */
182 u16 buf_size; /* Buffer size */
183 u32 buf_ptr; /* Descriptor buffer pointer */
184 u32 next_desc_ptr; /* Next descriptor pointer */
185};
186
187struct tx_desc {
188 u32 cmd_sts; /* Command/status field */
189 u16 reserved;
190 u16 byte_cnt; /* buffer byte count */
191 u32 buf_ptr; /* pointer to buffer for this descriptor */
192 u32 next_desc_ptr; /* Pointer to next descriptor */
193};
194
195struct pxa168_eth_private {
196 int port_num; /* User Ethernet port number */
197
198 int rx_resource_err; /* Rx ring resource error flag */
199
200 /* Next available and first returning Rx resource */
201 int rx_curr_desc_q, rx_used_desc_q;
202
203 /* Next available and first returning Tx resource */
204 int tx_curr_desc_q, tx_used_desc_q;
205
206 struct rx_desc *p_rx_desc_area;
207 dma_addr_t rx_desc_dma;
208 int rx_desc_area_size;
209 struct sk_buff **rx_skb;
210
211 struct tx_desc *p_tx_desc_area;
212 dma_addr_t tx_desc_dma;
213 int tx_desc_area_size;
214 struct sk_buff **tx_skb;
215
216 struct work_struct tx_timeout_task;
217
218 struct net_device *dev;
219 struct napi_struct napi;
220 u8 work_todo;
221 int skb_size;
222
223 struct net_device_stats stats;
224 /* Size of Tx Ring per queue */
225 int tx_ring_size;
226 /* Number of tx descriptors in use */
227 int tx_desc_count;
228 /* Size of Rx Ring per queue */
229 int rx_ring_size;
230 /* Number of rx descriptors in use */
231 int rx_desc_count;
232
233 /*
234 * Used in case RX Ring is empty, which can occur when
235 * system does not have resources (skb's)
236 */
237 struct timer_list timeout;
238 struct mii_bus *smi_bus;
239 struct phy_device *phy;
240
241 /* clock */
242 struct clk *clk;
243 struct pxa168_eth_platform_data *pd;
244 /*
245 * Ethernet controller base address.
246 */
247 void __iomem *base;
248
249 /* Pointer to the hardware address filter table */
250 void *htpr;
251 dma_addr_t htpr_dma;
252};
253
254struct addr_table_entry {
255 __le32 lo;
256 __le32 hi;
257};
258
259/* Bit fields of a Hash Table Entry */
260enum hash_table_entry {
261 HASH_ENTRY_VALID = 1,
262 SKIP = 2,
263 HASH_ENTRY_RECEIVE_DISCARD = 4,
264 HASH_ENTRY_RECEIVE_DISCARD_BIT = 2
265};
266
267static int pxa168_get_settings(struct net_device *dev, struct ethtool_cmd *cmd);
268static int pxa168_set_settings(struct net_device *dev, struct ethtool_cmd *cmd);
269static int pxa168_init_hw(struct pxa168_eth_private *pep);
270static void eth_port_reset(struct net_device *dev);
271static void eth_port_start(struct net_device *dev);
272static int pxa168_eth_open(struct net_device *dev);
273static int pxa168_eth_stop(struct net_device *dev);
274static int ethernet_phy_setup(struct net_device *dev);
275
276static inline u32 rdl(struct pxa168_eth_private *pep, int offset)
277{
278 return readl(pep->base + offset);
279}
280
281static inline void wrl(struct pxa168_eth_private *pep, int offset, u32 data)
282{
283 writel(data, pep->base + offset);
284}
285
286static void abort_dma(struct pxa168_eth_private *pep)
287{
288 int delay;
289 int max_retries = 40;
290
291 do {
292 wrl(pep, SDMA_CMD, SDMA_CMD_AR | SDMA_CMD_AT);
293 udelay(100);
294
295 delay = 10;
296 while ((rdl(pep, SDMA_CMD) & (SDMA_CMD_AR | SDMA_CMD_AT))
297 && delay-- > 0) {
298 udelay(10);
299 }
300 } while (max_retries-- > 0 && delay <= 0);
301
302 if (max_retries <= 0)
303 printk(KERN_ERR "%s : DMA Stuck\n", __func__);
304}
305
306static int ethernet_phy_get(struct pxa168_eth_private *pep)
307{
308 unsigned int reg_data;
309
310 reg_data = rdl(pep, PHY_ADDRESS);
311
312 return (reg_data >> (5 * pep->port_num)) & 0x1f;
313}
314
315static void ethernet_phy_set_addr(struct pxa168_eth_private *pep, int phy_addr)
316{
317 u32 reg_data;
318 int addr_shift = 5 * pep->port_num;
319
320 reg_data = rdl(pep, PHY_ADDRESS);
321 reg_data &= ~(0x1f << addr_shift);
322 reg_data |= (phy_addr & 0x1f) << addr_shift;
323 wrl(pep, PHY_ADDRESS, reg_data);
324}
325
326static void ethernet_phy_reset(struct pxa168_eth_private *pep)
327{
328 int data;
329
330 data = phy_read(pep->phy, MII_BMCR);
331 if (data < 0)
332 return;
333
334 data |= BMCR_RESET;
335 if (phy_write(pep->phy, MII_BMCR, data) < 0)
336 return;
337
338 do {
339 data = phy_read(pep->phy, MII_BMCR);
340 } while (data >= 0 && data & BMCR_RESET);
341}
342
343static void rxq_refill(struct net_device *dev)
344{
345 struct pxa168_eth_private *pep = netdev_priv(dev);
346 struct sk_buff *skb;
347 struct rx_desc *p_used_rx_desc;
348 int used_rx_desc;
349
350 while (pep->rx_desc_count < pep->rx_ring_size) {
351 int size;
352
353 skb = dev_alloc_skb(pep->skb_size);
354 if (!skb)
355 break;
356 if (SKB_DMA_REALIGN)
357 skb_reserve(skb, SKB_DMA_REALIGN);
358 pep->rx_desc_count++;
359 /* Get 'used' Rx descriptor */
360 used_rx_desc = pep->rx_used_desc_q;
361 p_used_rx_desc = &pep->p_rx_desc_area[used_rx_desc];
362 size = skb->end - skb->data;
363 p_used_rx_desc->buf_ptr = dma_map_single(NULL,
364 skb->data,
365 size,
366 DMA_FROM_DEVICE);
367 p_used_rx_desc->buf_size = size;
368 pep->rx_skb[used_rx_desc] = skb;
369
370 /* Return the descriptor to DMA ownership */
371 wmb();
372 p_used_rx_desc->cmd_sts = BUF_OWNED_BY_DMA | RX_EN_INT;
373 wmb();
374
375 /* Move the used descriptor pointer to the next descriptor */
376 pep->rx_used_desc_q = (used_rx_desc + 1) % pep->rx_ring_size;
377
378 /* Any Rx return cancels the Rx resource error status */
379 pep->rx_resource_err = 0;
380
381 skb_reserve(skb, ETH_HW_IP_ALIGN);
382 }
383
384 /*
385 * If RX ring is empty of SKB, set a timer to try allocating
386 * again at a later time.
387 */
388 if (pep->rx_desc_count == 0) {
389 pep->timeout.expires = jiffies + (HZ / 10);
390 add_timer(&pep->timeout);
391 }
392}
393
394static inline void rxq_refill_timer_wrapper(unsigned long data)
395{
396 struct pxa168_eth_private *pep = (void *)data;
397 napi_schedule(&pep->napi);
398}
399
400static inline u8 flip_8_bits(u8 x)
401{
402 return (((x) & 0x01) << 3) | (((x) & 0x02) << 1)
403 | (((x) & 0x04) >> 1) | (((x) & 0x08) >> 3)
404 | (((x) & 0x10) << 3) | (((x) & 0x20) << 1)
405 | (((x) & 0x40) >> 1) | (((x) & 0x80) >> 3);
406}
407
408static void nibble_swap_every_byte(unsigned char *mac_addr)
409{
410 int i;
411 for (i = 0; i < ETH_ALEN; i++) {
412 mac_addr[i] = ((mac_addr[i] & 0x0f) << 4) |
413 ((mac_addr[i] & 0xf0) >> 4);
414 }
415}
416
417static void inverse_every_nibble(unsigned char *mac_addr)
418{
419 int i;
420 for (i = 0; i < ETH_ALEN; i++)
421 mac_addr[i] = flip_8_bits(mac_addr[i]);
422}
423
424/*
425 * ----------------------------------------------------------------------------
426 * This function will calculate the hash function of the address.
427 * Inputs
428 * mac_addr_orig - MAC address.
429 * Outputs
430 * return the calculated entry.
431 */
432static u32 hash_function(unsigned char *mac_addr_orig)
433{
434 u32 hash_result;
435 u32 addr0;
436 u32 addr1;
437 u32 addr2;
438 u32 addr3;
439 unsigned char mac_addr[ETH_ALEN];
440
441 /* Make a copy of MAC address since we are going to performe bit
442 * operations on it
443 */
444 memcpy(mac_addr, mac_addr_orig, ETH_ALEN);
445
446 nibble_swap_every_byte(mac_addr);
447 inverse_every_nibble(mac_addr);
448
449 addr0 = (mac_addr[5] >> 2) & 0x3f;
450 addr1 = (mac_addr[5] & 0x03) | (((mac_addr[4] & 0x7f)) << 2);
451 addr2 = ((mac_addr[4] & 0x80) >> 7) | mac_addr[3] << 1;
452 addr3 = (mac_addr[2] & 0xff) | ((mac_addr[1] & 1) << 8);
453
454 hash_result = (addr0 << 9) | (addr1 ^ addr2 ^ addr3);
455 hash_result = hash_result & 0x07ff;
456 return hash_result;
457}
458
459/*
460 * ----------------------------------------------------------------------------
461 * This function will add/del an entry to the address table.
462 * Inputs
463 * pep - ETHERNET .
464 * mac_addr - MAC address.
465 * skip - if 1, skip this address.Used in case of deleting an entry which is a
466 * part of chain in the hash table.We cant just delete the entry since
467 * that will break the chain.We need to defragment the tables time to
468 * time.
469 * rd - 0 Discard packet upon match.
470 * - 1 Receive packet upon match.
471 * Outputs
472 * address table entry is added/deleted.
473 * 0 if success.
474 * -ENOSPC if table full
475 */
476static int add_del_hash_entry(struct pxa168_eth_private *pep,
477 unsigned char *mac_addr,
478 u32 rd, u32 skip, int del)
479{
480 struct addr_table_entry *entry, *start;
481 u32 new_high;
482 u32 new_low;
483 u32 i;
484
485 new_low = (((mac_addr[1] >> 4) & 0xf) << 15)
486 | (((mac_addr[1] >> 0) & 0xf) << 11)
487 | (((mac_addr[0] >> 4) & 0xf) << 7)
488 | (((mac_addr[0] >> 0) & 0xf) << 3)
489 | (((mac_addr[3] >> 4) & 0x1) << 31)
490 | (((mac_addr[3] >> 0) & 0xf) << 27)
491 | (((mac_addr[2] >> 4) & 0xf) << 23)
492 | (((mac_addr[2] >> 0) & 0xf) << 19)
493 | (skip << SKIP) | (rd << HASH_ENTRY_RECEIVE_DISCARD_BIT)
494 | HASH_ENTRY_VALID;
495
496 new_high = (((mac_addr[5] >> 4) & 0xf) << 15)
497 | (((mac_addr[5] >> 0) & 0xf) << 11)
498 | (((mac_addr[4] >> 4) & 0xf) << 7)
499 | (((mac_addr[4] >> 0) & 0xf) << 3)
500 | (((mac_addr[3] >> 5) & 0x7) << 0);
501
502 /*
503 * Pick the appropriate table, start scanning for free/reusable
504 * entries at the index obtained by hashing the specified MAC address
505 */
506 start = (struct addr_table_entry *)(pep->htpr);
507 entry = start + hash_function(mac_addr);
508 for (i = 0; i < HOP_NUMBER; i++) {
509 if (!(le32_to_cpu(entry->lo) & HASH_ENTRY_VALID)) {
510 break;
511 } else {
512 /* if same address put in same position */
513 if (((le32_to_cpu(entry->lo) & 0xfffffff8) ==
514 (new_low & 0xfffffff8)) &&
515 (le32_to_cpu(entry->hi) == new_high)) {
516 break;
517 }
518 }
519 if (entry == start + 0x7ff)
520 entry = start;
521 else
522 entry++;
523 }
524
525 if (((le32_to_cpu(entry->lo) & 0xfffffff8) != (new_low & 0xfffffff8)) &&
526 (le32_to_cpu(entry->hi) != new_high) && del)
527 return 0;
528
529 if (i == HOP_NUMBER) {
530 if (!del) {
531 printk(KERN_INFO "%s: table section is full, need to "
532 "move to 16kB implementation?\n",
533 __FILE__);
534 return -ENOSPC;
535 } else
536 return 0;
537 }
538
539 /*
540 * Update the selected entry
541 */
542 if (del) {
543 entry->hi = 0;
544 entry->lo = 0;
545 } else {
546 entry->hi = cpu_to_le32(new_high);
547 entry->lo = cpu_to_le32(new_low);
548 }
549
550 return 0;
551}
552
553/*
554 * ----------------------------------------------------------------------------
555 * Create an addressTable entry from MAC address info
556 * found in the specifed net_device struct
557 *
558 * Input : pointer to ethernet interface network device structure
559 * Output : N/A
560 */
561static void update_hash_table_mac_address(struct pxa168_eth_private *pep,
562 unsigned char *oaddr,
563 unsigned char *addr)
564{
565 /* Delete old entry */
566 if (oaddr)
567 add_del_hash_entry(pep, oaddr, 1, 0, HASH_DELETE);
568 /* Add new entry */
569 add_del_hash_entry(pep, addr, 1, 0, HASH_ADD);
570}
571
572static int init_hash_table(struct pxa168_eth_private *pep)
573{
574 /*
575 * Hardware expects CPU to build a hash table based on a predefined
576 * hash function and populate it based on hardware address. The
577 * location of the hash table is identified by 32-bit pointer stored
578 * in HTPR internal register. Two possible sizes exists for the hash
579 * table 8kB (256kB of DRAM required (4 x 64 kB banks)) and 1/2kB
580 * (16kB of DRAM required (4 x 4 kB banks)).We currently only support
581 * 1/2kB.
582 */
583 /* TODO: Add support for 8kB hash table and alternative hash
584 * function.Driver can dynamically switch to them if the 1/2kB hash
585 * table is full.
586 */
587 if (pep->htpr == NULL) {
588 pep->htpr = dma_alloc_coherent(pep->dev->dev.parent,
589 HASH_ADDR_TABLE_SIZE,
590 &pep->htpr_dma, GFP_KERNEL);
591 if (pep->htpr == NULL)
592 return -ENOMEM;
593 }
594 memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE);
595 wrl(pep, HTPR, pep->htpr_dma);
596 return 0;
597}
598
599static void pxa168_eth_set_rx_mode(struct net_device *dev)
600{
601 struct pxa168_eth_private *pep = netdev_priv(dev);
602 struct netdev_hw_addr *ha;
603 u32 val;
604
605 val = rdl(pep, PORT_CONFIG);
606 if (dev->flags & IFF_PROMISC)
607 val |= PCR_PM;
608 else
609 val &= ~PCR_PM;
610 wrl(pep, PORT_CONFIG, val);
611
612 /*
613 * Remove the old list of MAC address and add dev->addr
614 * and multicast address.
615 */
616 memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE);
617 update_hash_table_mac_address(pep, NULL, dev->dev_addr);
618
619 netdev_for_each_mc_addr(ha, dev)
620 update_hash_table_mac_address(pep, NULL, ha->addr);
621}
622
623static int pxa168_eth_set_mac_address(struct net_device *dev, void *addr)
624{
625 struct sockaddr *sa = addr;
626 struct pxa168_eth_private *pep = netdev_priv(dev);
627 unsigned char oldMac[ETH_ALEN];
628
629 if (!is_valid_ether_addr(sa->sa_data))
630 return -EINVAL;
631 memcpy(oldMac, dev->dev_addr, ETH_ALEN);
632 memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
633 netif_addr_lock_bh(dev);
634 update_hash_table_mac_address(pep, oldMac, dev->dev_addr);
635 netif_addr_unlock_bh(dev);
636 return 0;
637}
638
639static void eth_port_start(struct net_device *dev)
640{
641 unsigned int val = 0;
642 struct pxa168_eth_private *pep = netdev_priv(dev);
643 int tx_curr_desc, rx_curr_desc;
644
645 /* Perform PHY reset, if there is a PHY. */
646 if (pep->phy != NULL) {
647 struct ethtool_cmd cmd;
648
649 pxa168_get_settings(pep->dev, &cmd);
650 ethernet_phy_reset(pep);
651 pxa168_set_settings(pep->dev, &cmd);
652 }
653
654 /* Assignment of Tx CTRP of given queue */
655 tx_curr_desc = pep->tx_curr_desc_q;
656 wrl(pep, ETH_C_TX_DESC_1,
657 (u32) (pep->tx_desc_dma + tx_curr_desc * sizeof(struct tx_desc)));
658
659 /* Assignment of Rx CRDP of given queue */
660 rx_curr_desc = pep->rx_curr_desc_q;
661 wrl(pep, ETH_C_RX_DESC_0,
662 (u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc)));
663
664 wrl(pep, ETH_F_RX_DESC_0,
665 (u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc)));
666
667 /* Clear all interrupts */
668 wrl(pep, INT_CAUSE, 0);
669
670 /* Enable all interrupts for receive, transmit and error. */
671 wrl(pep, INT_MASK, ALL_INTS);
672
673 val = rdl(pep, PORT_CONFIG);
674 val |= PCR_EN;
675 wrl(pep, PORT_CONFIG, val);
676
677 /* Start RX DMA engine */
678 val = rdl(pep, SDMA_CMD);
679 val |= SDMA_CMD_ERD;
680 wrl(pep, SDMA_CMD, val);
681}
682
683static void eth_port_reset(struct net_device *dev)
684{
685 struct pxa168_eth_private *pep = netdev_priv(dev);
686 unsigned int val = 0;
687
688 /* Stop all interrupts for receive, transmit and error. */
689 wrl(pep, INT_MASK, 0);
690
691 /* Clear all interrupts */
692 wrl(pep, INT_CAUSE, 0);
693
694 /* Stop RX DMA */
695 val = rdl(pep, SDMA_CMD);
696 val &= ~SDMA_CMD_ERD; /* abort dma command */
697
698 /* Abort any transmit and receive operations and put DMA
699 * in idle state.
700 */
701 abort_dma(pep);
702
703 /* Disable port */
704 val = rdl(pep, PORT_CONFIG);
705 val &= ~PCR_EN;
706 wrl(pep, PORT_CONFIG, val);
707}
708
709/*
710 * txq_reclaim - Free the tx desc data for completed descriptors
711 * If force is non-zero, frees uncompleted descriptors as well
712 */
713static int txq_reclaim(struct net_device *dev, int force)
714{
715 struct pxa168_eth_private *pep = netdev_priv(dev);
716 struct tx_desc *desc;
717 u32 cmd_sts;
718 struct sk_buff *skb;
719 int tx_index;
720 dma_addr_t addr;
721 int count;
722 int released = 0;
723
724 netif_tx_lock(dev);
725
726 pep->work_todo &= ~WORK_TX_DONE;
727 while (pep->tx_desc_count > 0) {
728 tx_index = pep->tx_used_desc_q;
729 desc = &pep->p_tx_desc_area[tx_index];
730 cmd_sts = desc->cmd_sts;
731 if (!force && (cmd_sts & BUF_OWNED_BY_DMA)) {
732 if (released > 0) {
733 goto txq_reclaim_end;
734 } else {
735 released = -1;
736 goto txq_reclaim_end;
737 }
738 }
739 pep->tx_used_desc_q = (tx_index + 1) % pep->tx_ring_size;
740 pep->tx_desc_count--;
741 addr = desc->buf_ptr;
742 count = desc->byte_cnt;
743 skb = pep->tx_skb[tx_index];
744 if (skb)
745 pep->tx_skb[tx_index] = NULL;
746
747 if (cmd_sts & TX_ERROR) {
748 if (net_ratelimit())
749 printk(KERN_ERR "%s: Error in TX\n", dev->name);
750 dev->stats.tx_errors++;
751 }
752 dma_unmap_single(NULL, addr, count, DMA_TO_DEVICE);
753 if (skb)
754 dev_kfree_skb_irq(skb);
755 released++;
756 }
757txq_reclaim_end:
758 netif_tx_unlock(dev);
759 return released;
760}
761
762static void pxa168_eth_tx_timeout(struct net_device *dev)
763{
764 struct pxa168_eth_private *pep = netdev_priv(dev);
765
766 printk(KERN_INFO "%s: TX timeout desc_count %d\n",
767 dev->name, pep->tx_desc_count);
768
769 schedule_work(&pep->tx_timeout_task);
770}
771
772static void pxa168_eth_tx_timeout_task(struct work_struct *work)
773{
774 struct pxa168_eth_private *pep = container_of(work,
775 struct pxa168_eth_private,
776 tx_timeout_task);
777 struct net_device *dev = pep->dev;
778 pxa168_eth_stop(dev);
779 pxa168_eth_open(dev);
780}
781
782static int rxq_process(struct net_device *dev, int budget)
783{
784 struct pxa168_eth_private *pep = netdev_priv(dev);
785 struct net_device_stats *stats = &dev->stats;
786 unsigned int received_packets = 0;
787 struct sk_buff *skb;
788
789 while (budget-- > 0) {
790 int rx_next_curr_desc, rx_curr_desc, rx_used_desc;
791 struct rx_desc *rx_desc;
792 unsigned int cmd_sts;
793
794 /* Do not process Rx ring in case of Rx ring resource error */
795 if (pep->rx_resource_err)
796 break;
797 rx_curr_desc = pep->rx_curr_desc_q;
798 rx_used_desc = pep->rx_used_desc_q;
799 rx_desc = &pep->p_rx_desc_area[rx_curr_desc];
800 cmd_sts = rx_desc->cmd_sts;
801 rmb();
802 if (cmd_sts & (BUF_OWNED_BY_DMA))
803 break;
804 skb = pep->rx_skb[rx_curr_desc];
805 pep->rx_skb[rx_curr_desc] = NULL;
806
807 rx_next_curr_desc = (rx_curr_desc + 1) % pep->rx_ring_size;
808 pep->rx_curr_desc_q = rx_next_curr_desc;
809
810 /* Rx descriptors exhausted. */
811 /* Set the Rx ring resource error flag */
812 if (rx_next_curr_desc == rx_used_desc)
813 pep->rx_resource_err = 1;
814 pep->rx_desc_count--;
815 dma_unmap_single(NULL, rx_desc->buf_ptr,
816 rx_desc->buf_size,
817 DMA_FROM_DEVICE);
818 received_packets++;
819 /*
820 * Update statistics.
821 * Note byte count includes 4 byte CRC count
822 */
823 stats->rx_packets++;
824 stats->rx_bytes += rx_desc->byte_cnt;
825 /*
826 * In case received a packet without first / last bits on OR
827 * the error summary bit is on, the packets needs to be droped.
828 */
829 if (((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
830 (RX_FIRST_DESC | RX_LAST_DESC))
831 || (cmd_sts & RX_ERROR)) {
832
833 stats->rx_dropped++;
834 if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
835 (RX_FIRST_DESC | RX_LAST_DESC)) {
836 if (net_ratelimit())
837 printk(KERN_ERR
838 "%s: Rx pkt on multiple desc\n",
839 dev->name);
840 }
841 if (cmd_sts & RX_ERROR)
842 stats->rx_errors++;
843 dev_kfree_skb_irq(skb);
844 } else {
845 /*
846 * The -4 is for the CRC in the trailer of the
847 * received packet
848 */
849 skb_put(skb, rx_desc->byte_cnt - 4);
850 skb->protocol = eth_type_trans(skb, dev);
851 netif_receive_skb(skb);
852 }
853 dev->last_rx = jiffies;
854 }
855 /* Fill RX ring with skb's */
856 rxq_refill(dev);
857 return received_packets;
858}
859
860static int pxa168_eth_collect_events(struct pxa168_eth_private *pep,
861 struct net_device *dev)
862{
863 u32 icr;
864 int ret = 0;
865
866 icr = rdl(pep, INT_CAUSE);
867 if (icr == 0)
868 return IRQ_NONE;
869
870 wrl(pep, INT_CAUSE, ~icr);
871 if (icr & (ICR_TXBUF_H | ICR_TXBUF_L)) {
872 pep->work_todo |= WORK_TX_DONE;
873 ret = 1;
874 }
875 if (icr & ICR_RXBUF)
876 ret = 1;
877 if (icr & ICR_MII_CH) {
878 pep->work_todo |= WORK_LINK;
879 ret = 1;
880 }
881 return ret;
882}
883
884static void handle_link_event(struct pxa168_eth_private *pep)
885{
886 struct net_device *dev = pep->dev;
887 u32 port_status;
888 int speed;
889 int duplex;
890 int fc;
891
892 port_status = rdl(pep, PORT_STATUS);
893 if (!(port_status & LINK_UP)) {
894 if (netif_carrier_ok(dev)) {
895 printk(KERN_INFO "%s: link down\n", dev->name);
896 netif_carrier_off(dev);
897 txq_reclaim(dev, 1);
898 }
899 return;
900 }
901 if (port_status & PORT_SPEED_100)
902 speed = 100;
903 else
904 speed = 10;
905
906 duplex = (port_status & FULL_DUPLEX) ? 1 : 0;
907 fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0;
908 printk(KERN_INFO "%s: link up, %d Mb/s, %s duplex, "
909 "flow control %sabled\n", dev->name,
910 speed, duplex ? "full" : "half", fc ? "en" : "dis");
911 if (!netif_carrier_ok(dev))
912 netif_carrier_on(dev);
913}
914
915static irqreturn_t pxa168_eth_int_handler(int irq, void *dev_id)
916{
917 struct net_device *dev = (struct net_device *)dev_id;
918 struct pxa168_eth_private *pep = netdev_priv(dev);
919
920 if (unlikely(!pxa168_eth_collect_events(pep, dev)))
921 return IRQ_NONE;
922 /* Disable interrupts */
923 wrl(pep, INT_MASK, 0);
924 napi_schedule(&pep->napi);
925 return IRQ_HANDLED;
926}
927
928static void pxa168_eth_recalc_skb_size(struct pxa168_eth_private *pep)
929{
930 int skb_size;
931
932 /*
933 * Reserve 2+14 bytes for an ethernet header (the hardware
934 * automatically prepends 2 bytes of dummy data to each
935 * received packet), 16 bytes for up to four VLAN tags, and
936 * 4 bytes for the trailing FCS -- 36 bytes total.
937 */
938 skb_size = pep->dev->mtu + 36;
939
940 /*
941 * Make sure that the skb size is a multiple of 8 bytes, as
942 * the lower three bits of the receive descriptor's buffer
943 * size field are ignored by the hardware.
944 */
945 pep->skb_size = (skb_size + 7) & ~7;
946
947 /*
948 * If NET_SKB_PAD is smaller than a cache line,
949 * netdev_alloc_skb() will cause skb->data to be misaligned
950 * to a cache line boundary. If this is the case, include
951 * some extra space to allow re-aligning the data area.
952 */
953 pep->skb_size += SKB_DMA_REALIGN;
954
955}
956
957static int set_port_config_ext(struct pxa168_eth_private *pep)
958{
959 int skb_size;
960
961 pxa168_eth_recalc_skb_size(pep);
962 if (pep->skb_size <= 1518)
963 skb_size = PCXR_MFL_1518;
964 else if (pep->skb_size <= 1536)
965 skb_size = PCXR_MFL_1536;
966 else if (pep->skb_size <= 2048)
967 skb_size = PCXR_MFL_2048;
968 else
969 skb_size = PCXR_MFL_64K;
970
971 /* Extended Port Configuration */
972 wrl(pep,
973 PORT_CONFIG_EXT, PCXR_2BSM | /* Two byte prefix aligns IP hdr */
974 PCXR_DSCP_EN | /* Enable DSCP in IP */
975 skb_size | PCXR_FLP | /* do not force link pass */
976 PCXR_TX_HIGH_PRI); /* Transmit - high priority queue */
977
978 return 0;
979}
980
981static int pxa168_init_hw(struct pxa168_eth_private *pep)
982{
983 int err = 0;
984
985 /* Disable interrupts */
986 wrl(pep, INT_MASK, 0);
987 wrl(pep, INT_CAUSE, 0);
988 /* Write to ICR to clear interrupts. */
989 wrl(pep, INT_W_CLEAR, 0);
990 /* Abort any transmit and receive operations and put DMA
991 * in idle state.
992 */
993 abort_dma(pep);
994 /* Initialize address hash table */
995 err = init_hash_table(pep);
996 if (err)
997 return err;
998 /* SDMA configuration */
999 wrl(pep, SDMA_CONFIG, SDCR_BSZ8 | /* Burst size = 32 bytes */
1000 SDCR_RIFB | /* Rx interrupt on frame */
1001 SDCR_BLMT | /* Little endian transmit */
1002 SDCR_BLMR | /* Little endian receive */
1003 SDCR_RC_MAX_RETRANS); /* Max retransmit count */
1004 /* Port Configuration */
1005 wrl(pep, PORT_CONFIG, PCR_HS); /* Hash size is 1/2kb */
1006 set_port_config_ext(pep);
1007
1008 return err;
1009}
1010
1011static int rxq_init(struct net_device *dev)
1012{
1013 struct pxa168_eth_private *pep = netdev_priv(dev);
1014 struct rx_desc *p_rx_desc;
1015 int size = 0, i = 0;
1016 int rx_desc_num = pep->rx_ring_size;
1017
1018 /* Allocate RX skb rings */
1019 pep->rx_skb = kmalloc(sizeof(*pep->rx_skb) * pep->rx_ring_size,
1020 GFP_KERNEL);
1021 if (!pep->rx_skb) {
1022 printk(KERN_ERR "%s: Cannot alloc RX skb ring\n", dev->name);
1023 return -ENOMEM;
1024 }
1025 /* Allocate RX ring */
1026 pep->rx_desc_count = 0;
1027 size = pep->rx_ring_size * sizeof(struct rx_desc);
1028 pep->rx_desc_area_size = size;
1029 pep->p_rx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
1030 &pep->rx_desc_dma, GFP_KERNEL);
1031 if (!pep->p_rx_desc_area) {
1032 printk(KERN_ERR "%s: Cannot alloc RX ring (size %d bytes)\n",
1033 dev->name, size);
1034 goto out;
1035 }
1036 memset((void *)pep->p_rx_desc_area, 0, size);
1037 /* initialize the next_desc_ptr links in the Rx descriptors ring */
1038 p_rx_desc = (struct rx_desc *)pep->p_rx_desc_area;
1039 for (i = 0; i < rx_desc_num; i++) {
1040 p_rx_desc[i].next_desc_ptr = pep->rx_desc_dma +
1041 ((i + 1) % rx_desc_num) * sizeof(struct rx_desc);
1042 }
1043 /* Save Rx desc pointer to driver struct. */
1044 pep->rx_curr_desc_q = 0;
1045 pep->rx_used_desc_q = 0;
1046 pep->rx_desc_area_size = rx_desc_num * sizeof(struct rx_desc);
1047 return 0;
1048out:
1049 kfree(pep->rx_skb);
1050 return -ENOMEM;
1051}
1052
1053static void rxq_deinit(struct net_device *dev)
1054{
1055 struct pxa168_eth_private *pep = netdev_priv(dev);
1056 int curr;
1057
1058 /* Free preallocated skb's on RX rings */
1059 for (curr = 0; pep->rx_desc_count && curr < pep->rx_ring_size; curr++) {
1060 if (pep->rx_skb[curr]) {
1061 dev_kfree_skb(pep->rx_skb[curr]);
1062 pep->rx_desc_count--;
1063 }
1064 }
1065 if (pep->rx_desc_count)
1066 printk(KERN_ERR
1067 "Error in freeing Rx Ring. %d skb's still\n",
1068 pep->rx_desc_count);
1069 /* Free RX ring */
1070 if (pep->p_rx_desc_area)
1071 dma_free_coherent(pep->dev->dev.parent, pep->rx_desc_area_size,
1072 pep->p_rx_desc_area, pep->rx_desc_dma);
1073 kfree(pep->rx_skb);
1074}
1075
1076static int txq_init(struct net_device *dev)
1077{
1078 struct pxa168_eth_private *pep = netdev_priv(dev);
1079 struct tx_desc *p_tx_desc;
1080 int size = 0, i = 0;
1081 int tx_desc_num = pep->tx_ring_size;
1082
1083 pep->tx_skb = kmalloc(sizeof(*pep->tx_skb) * pep->tx_ring_size,
1084 GFP_KERNEL);
1085 if (!pep->tx_skb) {
1086 printk(KERN_ERR "%s: Cannot alloc TX skb ring\n", dev->name);
1087 return -ENOMEM;
1088 }
1089 /* Allocate TX ring */
1090 pep->tx_desc_count = 0;
1091 size = pep->tx_ring_size * sizeof(struct tx_desc);
1092 pep->tx_desc_area_size = size;
1093 pep->p_tx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
1094 &pep->tx_desc_dma, GFP_KERNEL);
1095 if (!pep->p_tx_desc_area) {
1096 printk(KERN_ERR "%s: Cannot allocate Tx Ring (size %d bytes)\n",
1097 dev->name, size);
1098 goto out;
1099 }
1100 memset((void *)pep->p_tx_desc_area, 0, pep->tx_desc_area_size);
1101 /* Initialize the next_desc_ptr links in the Tx descriptors ring */
1102 p_tx_desc = (struct tx_desc *)pep->p_tx_desc_area;
1103 for (i = 0; i < tx_desc_num; i++) {
1104 p_tx_desc[i].next_desc_ptr = pep->tx_desc_dma +
1105 ((i + 1) % tx_desc_num) * sizeof(struct tx_desc);
1106 }
1107 pep->tx_curr_desc_q = 0;
1108 pep->tx_used_desc_q = 0;
1109 pep->tx_desc_area_size = tx_desc_num * sizeof(struct tx_desc);
1110 return 0;
1111out:
1112 kfree(pep->tx_skb);
1113 return -ENOMEM;
1114}
1115
1116static void txq_deinit(struct net_device *dev)
1117{
1118 struct pxa168_eth_private *pep = netdev_priv(dev);
1119
1120 /* Free outstanding skb's on TX ring */
1121 txq_reclaim(dev, 1);
1122 BUG_ON(pep->tx_used_desc_q != pep->tx_curr_desc_q);
1123 /* Free TX ring */
1124 if (pep->p_tx_desc_area)
1125 dma_free_coherent(pep->dev->dev.parent, pep->tx_desc_area_size,
1126 pep->p_tx_desc_area, pep->tx_desc_dma);
1127 kfree(pep->tx_skb);
1128}
1129
1130static int pxa168_eth_open(struct net_device *dev)
1131{
1132 struct pxa168_eth_private *pep = netdev_priv(dev);
1133 int err;
1134
1135 err = request_irq(dev->irq, pxa168_eth_int_handler,
1136 IRQF_DISABLED, dev->name, dev);
1137 if (err) {
1138 dev_printk(KERN_ERR, &dev->dev, "can't assign irq\n");
1139 return -EAGAIN;
1140 }
1141 pep->rx_resource_err = 0;
1142 err = rxq_init(dev);
1143 if (err != 0)
1144 goto out_free_irq;
1145 err = txq_init(dev);
1146 if (err != 0)
1147 goto out_free_rx_skb;
1148 pep->rx_used_desc_q = 0;
1149 pep->rx_curr_desc_q = 0;
1150
1151 /* Fill RX ring with skb's */
1152 rxq_refill(dev);
1153 pep->rx_used_desc_q = 0;
1154 pep->rx_curr_desc_q = 0;
1155 netif_carrier_off(dev);
1156 eth_port_start(dev);
1157 napi_enable(&pep->napi);
1158 return 0;
1159out_free_rx_skb:
1160 rxq_deinit(dev);
1161out_free_irq:
1162 free_irq(dev->irq, dev);
1163 return err;
1164}
1165
1166static int pxa168_eth_stop(struct net_device *dev)
1167{
1168 struct pxa168_eth_private *pep = netdev_priv(dev);
1169 eth_port_reset(dev);
1170
1171 /* Disable interrupts */
1172 wrl(pep, INT_MASK, 0);
1173 wrl(pep, INT_CAUSE, 0);
1174 /* Write to ICR to clear interrupts. */
1175 wrl(pep, INT_W_CLEAR, 0);
1176 napi_disable(&pep->napi);
1177 del_timer_sync(&pep->timeout);
1178 netif_carrier_off(dev);
1179 free_irq(dev->irq, dev);
1180 rxq_deinit(dev);
1181 txq_deinit(dev);
1182
1183 return 0;
1184}
1185
1186static int pxa168_eth_change_mtu(struct net_device *dev, int mtu)
1187{
1188 int retval;
1189 struct pxa168_eth_private *pep = netdev_priv(dev);
1190
1191 if ((mtu > 9500) || (mtu < 68))
1192 return -EINVAL;
1193
1194 dev->mtu = mtu;
1195 retval = set_port_config_ext(pep);
1196
1197 if (!netif_running(dev))
1198 return 0;
1199
1200 /*
1201 * Stop and then re-open the interface. This will allocate RX
1202 * skbs of the new MTU.
1203 * There is a possible danger that the open will not succeed,
1204 * due to memory being full.
1205 */
1206 pxa168_eth_stop(dev);
1207 if (pxa168_eth_open(dev)) {
1208 dev_printk(KERN_ERR, &dev->dev,
1209 "fatal error on re-opening device after "
1210 "MTU change\n");
1211 }
1212
1213 return 0;
1214}
1215
1216static int eth_alloc_tx_desc_index(struct pxa168_eth_private *pep)
1217{
1218 int tx_desc_curr;
1219
1220 tx_desc_curr = pep->tx_curr_desc_q;
1221 pep->tx_curr_desc_q = (tx_desc_curr + 1) % pep->tx_ring_size;
1222 BUG_ON(pep->tx_curr_desc_q == pep->tx_used_desc_q);
1223 pep->tx_desc_count++;
1224
1225 return tx_desc_curr;
1226}
1227
1228static int pxa168_rx_poll(struct napi_struct *napi, int budget)
1229{
1230 struct pxa168_eth_private *pep =
1231 container_of(napi, struct pxa168_eth_private, napi);
1232 struct net_device *dev = pep->dev;
1233 int work_done = 0;
1234
1235 if (unlikely(pep->work_todo & WORK_LINK)) {
1236 pep->work_todo &= ~(WORK_LINK);
1237 handle_link_event(pep);
1238 }
1239 /*
1240 * We call txq_reclaim every time since in NAPI interupts are disabled
1241 * and due to this we miss the TX_DONE interrupt,which is not updated in
1242 * interrupt status register.
1243 */
1244 txq_reclaim(dev, 0);
1245 if (netif_queue_stopped(dev)
1246 && pep->tx_ring_size - pep->tx_desc_count > 1) {
1247 netif_wake_queue(dev);
1248 }
1249 work_done = rxq_process(dev, budget);
1250 if (work_done < budget) {
1251 napi_complete(napi);
1252 wrl(pep, INT_MASK, ALL_INTS);
1253 }
1254
1255 return work_done;
1256}
1257
1258static int pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
1259{
1260 struct pxa168_eth_private *pep = netdev_priv(dev);
1261 struct net_device_stats *stats = &dev->stats;
1262 struct tx_desc *desc;
1263 int tx_index;
1264 int length;
1265
1266 tx_index = eth_alloc_tx_desc_index(pep);
1267 desc = &pep->p_tx_desc_area[tx_index];
1268 length = skb->len;
1269 pep->tx_skb[tx_index] = skb;
1270 desc->byte_cnt = length;
1271 desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE);
1272 wmb();
1273 desc->cmd_sts = BUF_OWNED_BY_DMA | TX_GEN_CRC | TX_FIRST_DESC |
1274 TX_ZERO_PADDING | TX_LAST_DESC | TX_EN_INT;
1275 wmb();
1276 wrl(pep, SDMA_CMD, SDMA_CMD_TXDH | SDMA_CMD_ERD);
1277
1278 stats->tx_bytes += skb->len;
1279 stats->tx_packets++;
1280 dev->trans_start = jiffies;
1281 if (pep->tx_ring_size - pep->tx_desc_count <= 1) {
1282 /* We handled the current skb, but now we are out of space.*/
1283 netif_stop_queue(dev);
1284 }
1285
1286 return NETDEV_TX_OK;
1287}
1288
1289static int smi_wait_ready(struct pxa168_eth_private *pep)
1290{
1291 int i = 0;
1292
1293 /* wait for the SMI register to become available */
1294 for (i = 0; rdl(pep, SMI) & SMI_BUSY; i++) {
1295 if (i == PHY_WAIT_ITERATIONS)
1296 return -ETIMEDOUT;
1297 msleep(10);
1298 }
1299
1300 return 0;
1301}
1302
1303static int pxa168_smi_read(struct mii_bus *bus, int phy_addr, int regnum)
1304{
1305 struct pxa168_eth_private *pep = bus->priv;
1306 int i = 0;
1307 int val;
1308
1309 if (smi_wait_ready(pep)) {
1310 printk(KERN_WARNING "pxa168_eth: SMI bus busy timeout\n");
1311 return -ETIMEDOUT;
1312 }
1313 wrl(pep, SMI, (phy_addr << 16) | (regnum << 21) | SMI_OP_R);
1314 /* now wait for the data to be valid */
1315 for (i = 0; !((val = rdl(pep, SMI)) & SMI_R_VALID); i++) {
1316 if (i == PHY_WAIT_ITERATIONS) {
1317 printk(KERN_WARNING
1318 "pxa168_eth: SMI bus read not valid\n");
1319 return -ENODEV;
1320 }
1321 msleep(10);
1322 }
1323
1324 return val & 0xffff;
1325}
1326
1327static int pxa168_smi_write(struct mii_bus *bus, int phy_addr, int regnum,
1328 u16 value)
1329{
1330 struct pxa168_eth_private *pep = bus->priv;
1331
1332 if (smi_wait_ready(pep)) {
1333 printk(KERN_WARNING "pxa168_eth: SMI bus busy timeout\n");
1334 return -ETIMEDOUT;
1335 }
1336
1337 wrl(pep, SMI, (phy_addr << 16) | (regnum << 21) |
1338 SMI_OP_W | (value & 0xffff));
1339
1340 if (smi_wait_ready(pep)) {
1341 printk(KERN_ERR "pxa168_eth: SMI bus busy timeout\n");
1342 return -ETIMEDOUT;
1343 }
1344
1345 return 0;
1346}
1347
1348static int pxa168_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr,
1349 int cmd)
1350{
1351 struct pxa168_eth_private *pep = netdev_priv(dev);
1352 if (pep->phy != NULL)
1353 return phy_mii_ioctl(pep->phy, ifr, cmd);
1354
1355 return -EOPNOTSUPP;
1356}
1357
1358static struct phy_device *phy_scan(struct pxa168_eth_private *pep, int phy_addr)
1359{
1360 struct mii_bus *bus = pep->smi_bus;
1361 struct phy_device *phydev;
1362 int start;
1363 int num;
1364 int i;
1365
1366 if (phy_addr == PXA168_ETH_PHY_ADDR_DEFAULT) {
1367 /* Scan entire range */
1368 start = ethernet_phy_get(pep);
1369 num = 32;
1370 } else {
1371 /* Use phy addr specific to platform */
1372 start = phy_addr & 0x1f;
1373 num = 1;
1374 }
1375 phydev = NULL;
1376 for (i = 0; i < num; i++) {
1377 int addr = (start + i) & 0x1f;
1378 if (bus->phy_map[addr] == NULL)
1379 mdiobus_scan(bus, addr);
1380
1381 if (phydev == NULL) {
1382 phydev = bus->phy_map[addr];
1383 if (phydev != NULL)
1384 ethernet_phy_set_addr(pep, addr);
1385 }
1386 }
1387
1388 return phydev;
1389}
1390
1391static void phy_init(struct pxa168_eth_private *pep, int speed, int duplex)
1392{
1393 struct phy_device *phy = pep->phy;
1394 ethernet_phy_reset(pep);
1395
1396 phy_attach(pep->dev, dev_name(&phy->dev), 0, PHY_INTERFACE_MODE_MII);
1397
1398 if (speed == 0) {
1399 phy->autoneg = AUTONEG_ENABLE;
1400 phy->speed = 0;
1401 phy->duplex = 0;
1402 phy->supported &= PHY_BASIC_FEATURES;
1403 phy->advertising = phy->supported | ADVERTISED_Autoneg;
1404 } else {
1405 phy->autoneg = AUTONEG_DISABLE;
1406 phy->advertising = 0;
1407 phy->speed = speed;
1408 phy->duplex = duplex;
1409 }
1410 phy_start_aneg(phy);
1411}
1412
1413static int ethernet_phy_setup(struct net_device *dev)
1414{
1415 struct pxa168_eth_private *pep = netdev_priv(dev);
1416
1417 if (pep->pd->init)
1418 pep->pd->init();
1419 pep->phy = phy_scan(pep, pep->pd->phy_addr & 0x1f);
1420 if (pep->phy != NULL)
1421 phy_init(pep, pep->pd->speed, pep->pd->duplex);
1422 update_hash_table_mac_address(pep, NULL, dev->dev_addr);
1423
1424 return 0;
1425}
1426
1427static int pxa168_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1428{
1429 struct pxa168_eth_private *pep = netdev_priv(dev);
1430 int err;
1431
1432 err = phy_read_status(pep->phy);
1433 if (err == 0)
1434 err = phy_ethtool_gset(pep->phy, cmd);
1435
1436 return err;
1437}
1438
1439static int pxa168_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1440{
1441 struct pxa168_eth_private *pep = netdev_priv(dev);
1442
1443 return phy_ethtool_sset(pep->phy, cmd);
1444}
1445
1446static void pxa168_get_drvinfo(struct net_device *dev,
1447 struct ethtool_drvinfo *info)
1448{
1449 strncpy(info->driver, DRIVER_NAME, 32);
1450 strncpy(info->version, DRIVER_VERSION, 32);
1451 strncpy(info->fw_version, "N/A", 32);
1452 strncpy(info->bus_info, "N/A", 32);
1453}
1454
1455static u32 pxa168_get_link(struct net_device *dev)
1456{
1457 return !!netif_carrier_ok(dev);
1458}
1459
1460static const struct ethtool_ops pxa168_ethtool_ops = {
1461 .get_settings = pxa168_get_settings,
1462 .set_settings = pxa168_set_settings,
1463 .get_drvinfo = pxa168_get_drvinfo,
1464 .get_link = pxa168_get_link,
1465};
1466
1467static const struct net_device_ops pxa168_eth_netdev_ops = {
1468 .ndo_open = pxa168_eth_open,
1469 .ndo_stop = pxa168_eth_stop,
1470 .ndo_start_xmit = pxa168_eth_start_xmit,
1471 .ndo_set_rx_mode = pxa168_eth_set_rx_mode,
1472 .ndo_set_mac_address = pxa168_eth_set_mac_address,
1473 .ndo_validate_addr = eth_validate_addr,
1474 .ndo_do_ioctl = pxa168_eth_do_ioctl,
1475 .ndo_change_mtu = pxa168_eth_change_mtu,
1476 .ndo_tx_timeout = pxa168_eth_tx_timeout,
1477};
1478
1479static int pxa168_eth_probe(struct platform_device *pdev)
1480{
1481 struct pxa168_eth_private *pep = NULL;
1482 struct net_device *dev = NULL;
1483 struct resource *res;
1484 struct clk *clk;
1485 int err;
1486
1487 printk(KERN_NOTICE "PXA168 10/100 Ethernet Driver\n");
1488
1489 clk = clk_get(&pdev->dev, "MFUCLK");
1490 if (IS_ERR(clk)) {
1491 printk(KERN_ERR "%s: Fast Ethernet failed to get clock\n",
1492 DRIVER_NAME);
1493 return -ENODEV;
1494 }
1495 clk_enable(clk);
1496
1497 dev = alloc_etherdev(sizeof(struct pxa168_eth_private));
1498 if (!dev) {
1499 err = -ENOMEM;
1500 goto err_clk;
1501 }
1502
1503 platform_set_drvdata(pdev, dev);
1504 pep = netdev_priv(dev);
1505 pep->dev = dev;
1506 pep->clk = clk;
1507 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1508 if (res == NULL) {
1509 err = -ENODEV;
1510 goto err_netdev;
1511 }
1512 pep->base = ioremap(res->start, res->end - res->start + 1);
1513 if (pep->base == NULL) {
1514 err = -ENOMEM;
1515 goto err_netdev;
1516 }
1517 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1518 BUG_ON(!res);
1519 dev->irq = res->start;
1520 dev->netdev_ops = &pxa168_eth_netdev_ops;
1521 dev->watchdog_timeo = 2 * HZ;
1522 dev->base_addr = 0;
1523 SET_ETHTOOL_OPS(dev, &pxa168_ethtool_ops);
1524
1525 INIT_WORK(&pep->tx_timeout_task, pxa168_eth_tx_timeout_task);
1526
1527 printk(KERN_INFO "%s:Using random mac address\n", DRIVER_NAME);
1528 random_ether_addr(dev->dev_addr);
1529
1530 pep->pd = pdev->dev.platform_data;
1531 pep->rx_ring_size = NUM_RX_DESCS;
1532 if (pep->pd->rx_queue_size)
1533 pep->rx_ring_size = pep->pd->rx_queue_size;
1534
1535 pep->tx_ring_size = NUM_TX_DESCS;
1536 if (pep->pd->tx_queue_size)
1537 pep->tx_ring_size = pep->pd->tx_queue_size;
1538
1539 pep->port_num = pep->pd->port_number;
1540 /* Hardware supports only 3 ports */
1541 BUG_ON(pep->port_num > 2);
1542 netif_napi_add(dev, &pep->napi, pxa168_rx_poll, pep->rx_ring_size);
1543
1544 memset(&pep->timeout, 0, sizeof(struct timer_list));
1545 init_timer(&pep->timeout);
1546 pep->timeout.function = rxq_refill_timer_wrapper;
1547 pep->timeout.data = (unsigned long)pep;
1548
1549 pep->smi_bus = mdiobus_alloc();
1550 if (pep->smi_bus == NULL) {
1551 err = -ENOMEM;
1552 goto err_base;
1553 }
1554 pep->smi_bus->priv = pep;
1555 pep->smi_bus->name = "pxa168_eth smi";
1556 pep->smi_bus->read = pxa168_smi_read;
1557 pep->smi_bus->write = pxa168_smi_write;
1558 snprintf(pep->smi_bus->id, MII_BUS_ID_SIZE, "%d", pdev->id);
1559 pep->smi_bus->parent = &pdev->dev;
1560 pep->smi_bus->phy_mask = 0xffffffff;
1561 err = mdiobus_register(pep->smi_bus);
1562 if (err)
1563 goto err_free_mdio;
1564
1565 pxa168_init_hw(pep);
1566 err = ethernet_phy_setup(dev);
1567 if (err)
1568 goto err_mdiobus;
1569 SET_NETDEV_DEV(dev, &pdev->dev);
1570 err = register_netdev(dev);
1571 if (err)
1572 goto err_mdiobus;
1573 return 0;
1574
1575err_mdiobus:
1576 mdiobus_unregister(pep->smi_bus);
1577err_free_mdio:
1578 mdiobus_free(pep->smi_bus);
1579err_base:
1580 iounmap(pep->base);
1581err_netdev:
1582 free_netdev(dev);
1583err_clk:
1584 clk_disable(clk);
1585 clk_put(clk);
1586 return err;
1587}
1588
1589static int pxa168_eth_remove(struct platform_device *pdev)
1590{
1591 struct net_device *dev = platform_get_drvdata(pdev);
1592 struct pxa168_eth_private *pep = netdev_priv(dev);
1593
1594 if (pep->htpr) {
1595 dma_free_coherent(pep->dev->dev.parent, HASH_ADDR_TABLE_SIZE,
1596 pep->htpr, pep->htpr_dma);
1597 pep->htpr = NULL;
1598 }
1599 if (pep->clk) {
1600 clk_disable(pep->clk);
1601 clk_put(pep->clk);
1602 pep->clk = NULL;
1603 }
1604 if (pep->phy != NULL)
1605 phy_detach(pep->phy);
1606
1607 iounmap(pep->base);
1608 pep->base = NULL;
1609 mdiobus_unregister(pep->smi_bus);
1610 mdiobus_free(pep->smi_bus);
1611 unregister_netdev(dev);
1612 flush_scheduled_work();
1613 free_netdev(dev);
1614 platform_set_drvdata(pdev, NULL);
1615 return 0;
1616}
1617
1618static void pxa168_eth_shutdown(struct platform_device *pdev)
1619{
1620 struct net_device *dev = platform_get_drvdata(pdev);
1621 eth_port_reset(dev);
1622}
1623
1624#ifdef CONFIG_PM
1625static int pxa168_eth_resume(struct platform_device *pdev)
1626{
1627 return -ENOSYS;
1628}
1629
1630static int pxa168_eth_suspend(struct platform_device *pdev, pm_message_t state)
1631{
1632 return -ENOSYS;
1633}
1634
1635#else
1636#define pxa168_eth_resume NULL
1637#define pxa168_eth_suspend NULL
1638#endif
1639
1640static struct platform_driver pxa168_eth_driver = {
1641 .probe = pxa168_eth_probe,
1642 .remove = pxa168_eth_remove,
1643 .shutdown = pxa168_eth_shutdown,
1644 .resume = pxa168_eth_resume,
1645 .suspend = pxa168_eth_suspend,
1646 .driver = {
1647 .name = DRIVER_NAME,
1648 },
1649};
1650
1651static int __init pxa168_init_module(void)
1652{
1653 return platform_driver_register(&pxa168_eth_driver);
1654}
1655
1656static void __exit pxa168_cleanup_module(void)
1657{
1658 platform_driver_unregister(&pxa168_eth_driver);
1659}
1660
1661module_init(pxa168_init_module);
1662module_exit(pxa168_cleanup_module);
1663
1664MODULE_LICENSE("GPL");
1665MODULE_DESCRIPTION("Ethernet driver for Marvell PXA168");
1666MODULE_ALIAS("platform:pxa168_eth");
diff --git a/drivers/net/qlcnic/qlcnic_init.c b/drivers/net/qlcnic/qlcnic_init.c
index 75ba744b173c..2c7cf0b64811 100644
--- a/drivers/net/qlcnic/qlcnic_init.c
+++ b/drivers/net/qlcnic/qlcnic_init.c
@@ -1316,7 +1316,7 @@ qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter,
1316 return -ENOMEM; 1316 return -ENOMEM;
1317 } 1317 }
1318 1318
1319 skb_reserve(skb, 2); 1319 skb_reserve(skb, NET_IP_ALIGN);
1320 1320
1321 dma = pci_map_single(pdev, skb->data, 1321 dma = pci_map_single(pdev, skb->data,
1322 rds_ring->dma_size, PCI_DMA_FROMDEVICE); 1322 rds_ring->dma_size, PCI_DMA_FROMDEVICE);
@@ -1404,7 +1404,6 @@ qlcnic_process_rcv(struct qlcnic_adapter *adapter,
1404 if (pkt_offset) 1404 if (pkt_offset)
1405 skb_pull(skb, pkt_offset); 1405 skb_pull(skb, pkt_offset);
1406 1406
1407 skb->truesize = skb->len + sizeof(struct sk_buff);
1408 skb->protocol = eth_type_trans(skb, netdev); 1407 skb->protocol = eth_type_trans(skb, netdev);
1409 1408
1410 napi_gro_receive(&sds_ring->napi, skb); 1409 napi_gro_receive(&sds_ring->napi, skb);
@@ -1466,8 +1465,6 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
1466 1465
1467 skb_put(skb, lro_length + data_offset); 1466 skb_put(skb, lro_length + data_offset);
1468 1467
1469 skb->truesize = skb->len + sizeof(struct sk_buff) + skb_headroom(skb);
1470
1471 skb_pull(skb, l2_hdr_offset); 1468 skb_pull(skb, l2_hdr_offset);
1472 skb->protocol = eth_type_trans(skb, netdev); 1469 skb->protocol = eth_type_trans(skb, netdev);
1473 1470
@@ -1700,8 +1697,6 @@ qlcnic_process_rcv_diag(struct qlcnic_adapter *adapter,
1700 if (pkt_offset) 1697 if (pkt_offset)
1701 skb_pull(skb, pkt_offset); 1698 skb_pull(skb, pkt_offset);
1702 1699
1703 skb->truesize = skb->len + sizeof(struct sk_buff);
1704
1705 if (!qlcnic_check_loopback_buff(skb->data)) 1700 if (!qlcnic_check_loopback_buff(skb->data))
1706 adapter->diag_cnt++; 1701 adapter->diag_cnt++;
1707 1702
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
index bf6d87adda4f..66eea5972020 100644
--- a/drivers/net/qlcnic/qlcnic_main.c
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -1983,8 +1983,6 @@ static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
1983 struct qlcnic_adapter *adapter = netdev_priv(netdev); 1983 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1984 struct net_device_stats *stats = &netdev->stats; 1984 struct net_device_stats *stats = &netdev->stats;
1985 1985
1986 memset(stats, 0, sizeof(*stats));
1987
1988 stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts; 1986 stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
1989 stats->tx_packets = adapter->stats.xmitfinished; 1987 stats->tx_packets = adapter->stats.xmitfinished;
1990 stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes; 1988 stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes;
@@ -2190,9 +2188,16 @@ static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
2190#ifdef CONFIG_NET_POLL_CONTROLLER 2188#ifdef CONFIG_NET_POLL_CONTROLLER
2191static void qlcnic_poll_controller(struct net_device *netdev) 2189static void qlcnic_poll_controller(struct net_device *netdev)
2192{ 2190{
2191 int ring;
2192 struct qlcnic_host_sds_ring *sds_ring;
2193 struct qlcnic_adapter *adapter = netdev_priv(netdev); 2193 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2194 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
2195
2194 disable_irq(adapter->irq); 2196 disable_irq(adapter->irq);
2195 qlcnic_intr(adapter->irq, adapter); 2197 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
2198 sds_ring = &recv_ctx->sds_rings[ring];
2199 qlcnic_intr(adapter->irq, sds_ring);
2200 }
2196 enable_irq(adapter->irq); 2201 enable_irq(adapter->irq);
2197} 2202}
2198#endif 2203#endif
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index 8d63f69b27d9..5f89e83501f4 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -3919,12 +3919,12 @@ static int ql_adapter_down(struct ql_adapter *qdev)
3919 for (i = 0; i < qdev->rss_ring_count; i++) 3919 for (i = 0; i < qdev->rss_ring_count; i++)
3920 netif_napi_del(&qdev->rx_ring[i].napi); 3920 netif_napi_del(&qdev->rx_ring[i].napi);
3921 3921
3922 ql_free_rx_buffers(qdev);
3923
3924 status = ql_adapter_reset(qdev); 3922 status = ql_adapter_reset(qdev);
3925 if (status) 3923 if (status)
3926 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n", 3924 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
3927 qdev->func); 3925 qdev->func);
3926 ql_free_rx_buffers(qdev);
3927
3928 return status; 3928 return status;
3929} 3929}
3930 3930
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 078bbf4e6f19..992db2fa136e 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -1212,7 +1212,8 @@ static void rtl8169_update_counters(struct net_device *dev)
1212 if ((RTL_R8(ChipCmd) & CmdRxEnb) == 0) 1212 if ((RTL_R8(ChipCmd) & CmdRxEnb) == 0)
1213 return; 1213 return;
1214 1214
1215 counters = pci_alloc_consistent(tp->pci_dev, sizeof(*counters), &paddr); 1215 counters = dma_alloc_coherent(&tp->pci_dev->dev, sizeof(*counters),
1216 &paddr, GFP_KERNEL);
1216 if (!counters) 1217 if (!counters)
1217 return; 1218 return;
1218 1219
@@ -1233,7 +1234,8 @@ static void rtl8169_update_counters(struct net_device *dev)
1233 RTL_W32(CounterAddrLow, 0); 1234 RTL_W32(CounterAddrLow, 0);
1234 RTL_W32(CounterAddrHigh, 0); 1235 RTL_W32(CounterAddrHigh, 0);
1235 1236
1236 pci_free_consistent(tp->pci_dev, sizeof(*counters), counters, paddr); 1237 dma_free_coherent(&tp->pci_dev->dev, sizeof(*counters), counters,
1238 paddr);
1237} 1239}
1238 1240
1239static void rtl8169_get_ethtool_stats(struct net_device *dev, 1241static void rtl8169_get_ethtool_stats(struct net_device *dev,
@@ -2934,7 +2936,7 @@ static const struct rtl_cfg_info {
2934 .hw_start = rtl_hw_start_8168, 2936 .hw_start = rtl_hw_start_8168,
2935 .region = 2, 2937 .region = 2,
2936 .align = 8, 2938 .align = 8,
2937 .intr_event = SYSErr | LinkChg | RxOverflow | 2939 .intr_event = SYSErr | RxFIFOOver | LinkChg | RxOverflow |
2938 TxErr | TxOK | RxOK | RxErr, 2940 TxErr | TxOK | RxOK | RxErr,
2939 .napi_event = TxErr | TxOK | RxOK | RxOverflow, 2941 .napi_event = TxErr | TxOK | RxOK | RxOverflow,
2940 .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI, 2942 .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI,
@@ -3292,15 +3294,15 @@ static int rtl8169_open(struct net_device *dev)
3292 3294
3293 /* 3295 /*
3294 * Rx and Tx desscriptors needs 256 bytes alignment. 3296 * Rx and Tx desscriptors needs 256 bytes alignment.
3295 * pci_alloc_consistent provides more. 3297 * dma_alloc_coherent provides more.
3296 */ 3298 */
3297 tp->TxDescArray = pci_alloc_consistent(pdev, R8169_TX_RING_BYTES, 3299 tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES,
3298 &tp->TxPhyAddr); 3300 &tp->TxPhyAddr, GFP_KERNEL);
3299 if (!tp->TxDescArray) 3301 if (!tp->TxDescArray)
3300 goto err_pm_runtime_put; 3302 goto err_pm_runtime_put;
3301 3303
3302 tp->RxDescArray = pci_alloc_consistent(pdev, R8169_RX_RING_BYTES, 3304 tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES,
3303 &tp->RxPhyAddr); 3305 &tp->RxPhyAddr, GFP_KERNEL);
3304 if (!tp->RxDescArray) 3306 if (!tp->RxDescArray)
3305 goto err_free_tx_0; 3307 goto err_free_tx_0;
3306 3308
@@ -3334,12 +3336,12 @@ out:
3334err_release_ring_2: 3336err_release_ring_2:
3335 rtl8169_rx_clear(tp); 3337 rtl8169_rx_clear(tp);
3336err_free_rx_1: 3338err_free_rx_1:
3337 pci_free_consistent(pdev, R8169_RX_RING_BYTES, tp->RxDescArray, 3339 dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
3338 tp->RxPhyAddr); 3340 tp->RxPhyAddr);
3339 tp->RxDescArray = NULL; 3341 tp->RxDescArray = NULL;
3340err_free_tx_0: 3342err_free_tx_0:
3341 pci_free_consistent(pdev, R8169_TX_RING_BYTES, tp->TxDescArray, 3343 dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
3342 tp->TxPhyAddr); 3344 tp->TxPhyAddr);
3343 tp->TxDescArray = NULL; 3345 tp->TxDescArray = NULL;
3344err_pm_runtime_put: 3346err_pm_runtime_put:
3345 pm_runtime_put_noidle(&pdev->dev); 3347 pm_runtime_put_noidle(&pdev->dev);
@@ -3975,7 +3977,7 @@ static void rtl8169_free_rx_skb(struct rtl8169_private *tp,
3975{ 3977{
3976 struct pci_dev *pdev = tp->pci_dev; 3978 struct pci_dev *pdev = tp->pci_dev;
3977 3979
3978 pci_unmap_single(pdev, le64_to_cpu(desc->addr), tp->rx_buf_sz, 3980 dma_unmap_single(&pdev->dev, le64_to_cpu(desc->addr), tp->rx_buf_sz,
3979 PCI_DMA_FROMDEVICE); 3981 PCI_DMA_FROMDEVICE);
3980 dev_kfree_skb(*sk_buff); 3982 dev_kfree_skb(*sk_buff);
3981 *sk_buff = NULL; 3983 *sk_buff = NULL;
@@ -4000,7 +4002,7 @@ static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
4000static struct sk_buff *rtl8169_alloc_rx_skb(struct pci_dev *pdev, 4002static struct sk_buff *rtl8169_alloc_rx_skb(struct pci_dev *pdev,
4001 struct net_device *dev, 4003 struct net_device *dev,
4002 struct RxDesc *desc, int rx_buf_sz, 4004 struct RxDesc *desc, int rx_buf_sz,
4003 unsigned int align) 4005 unsigned int align, gfp_t gfp)
4004{ 4006{
4005 struct sk_buff *skb; 4007 struct sk_buff *skb;
4006 dma_addr_t mapping; 4008 dma_addr_t mapping;
@@ -4008,13 +4010,13 @@ static struct sk_buff *rtl8169_alloc_rx_skb(struct pci_dev *pdev,
4008 4010
4009 pad = align ? align : NET_IP_ALIGN; 4011 pad = align ? align : NET_IP_ALIGN;
4010 4012
4011 skb = netdev_alloc_skb(dev, rx_buf_sz + pad); 4013 skb = __netdev_alloc_skb(dev, rx_buf_sz + pad, gfp);
4012 if (!skb) 4014 if (!skb)
4013 goto err_out; 4015 goto err_out;
4014 4016
4015 skb_reserve(skb, align ? ((pad - 1) & (unsigned long)skb->data) : pad); 4017 skb_reserve(skb, align ? ((pad - 1) & (unsigned long)skb->data) : pad);
4016 4018
4017 mapping = pci_map_single(pdev, skb->data, rx_buf_sz, 4019 mapping = dma_map_single(&pdev->dev, skb->data, rx_buf_sz,
4018 PCI_DMA_FROMDEVICE); 4020 PCI_DMA_FROMDEVICE);
4019 4021
4020 rtl8169_map_to_asic(desc, mapping, rx_buf_sz); 4022 rtl8169_map_to_asic(desc, mapping, rx_buf_sz);
@@ -4039,7 +4041,7 @@ static void rtl8169_rx_clear(struct rtl8169_private *tp)
4039} 4041}
4040 4042
4041static u32 rtl8169_rx_fill(struct rtl8169_private *tp, struct net_device *dev, 4043static u32 rtl8169_rx_fill(struct rtl8169_private *tp, struct net_device *dev,
4042 u32 start, u32 end) 4044 u32 start, u32 end, gfp_t gfp)
4043{ 4045{
4044 u32 cur; 4046 u32 cur;
4045 4047
@@ -4054,7 +4056,7 @@ static u32 rtl8169_rx_fill(struct rtl8169_private *tp, struct net_device *dev,
4054 4056
4055 skb = rtl8169_alloc_rx_skb(tp->pci_dev, dev, 4057 skb = rtl8169_alloc_rx_skb(tp->pci_dev, dev,
4056 tp->RxDescArray + i, 4058 tp->RxDescArray + i,
4057 tp->rx_buf_sz, tp->align); 4059 tp->rx_buf_sz, tp->align, gfp);
4058 if (!skb) 4060 if (!skb)
4059 break; 4061 break;
4060 4062
@@ -4082,7 +4084,7 @@ static int rtl8169_init_ring(struct net_device *dev)
4082 memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info)); 4084 memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info));
4083 memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *)); 4085 memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
4084 4086
4085 if (rtl8169_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC) 4087 if (rtl8169_rx_fill(tp, dev, 0, NUM_RX_DESC, GFP_KERNEL) != NUM_RX_DESC)
4086 goto err_out; 4088 goto err_out;
4087 4089
4088 rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1); 4090 rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1);
@@ -4099,7 +4101,8 @@ static void rtl8169_unmap_tx_skb(struct pci_dev *pdev, struct ring_info *tx_skb,
4099{ 4101{
4100 unsigned int len = tx_skb->len; 4102 unsigned int len = tx_skb->len;
4101 4103
4102 pci_unmap_single(pdev, le64_to_cpu(desc->addr), len, PCI_DMA_TODEVICE); 4104 dma_unmap_single(&pdev->dev, le64_to_cpu(desc->addr), len,
4105 PCI_DMA_TODEVICE);
4103 desc->opts1 = 0x00; 4106 desc->opts1 = 0x00;
4104 desc->opts2 = 0x00; 4107 desc->opts2 = 0x00;
4105 desc->addr = 0x00; 4108 desc->addr = 0x00;
@@ -4243,7 +4246,8 @@ static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
4243 txd = tp->TxDescArray + entry; 4246 txd = tp->TxDescArray + entry;
4244 len = frag->size; 4247 len = frag->size;
4245 addr = ((void *) page_address(frag->page)) + frag->page_offset; 4248 addr = ((void *) page_address(frag->page)) + frag->page_offset;
4246 mapping = pci_map_single(tp->pci_dev, addr, len, PCI_DMA_TODEVICE); 4249 mapping = dma_map_single(&tp->pci_dev->dev, addr, len,
4250 PCI_DMA_TODEVICE);
4247 4251
4248 /* anti gcc 2.95.3 bugware (sic) */ 4252 /* anti gcc 2.95.3 bugware (sic) */
4249 status = opts1 | len | (RingEnd * !((entry + 1) % NUM_TX_DESC)); 4253 status = opts1 | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
@@ -4313,7 +4317,8 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
4313 tp->tx_skb[entry].skb = skb; 4317 tp->tx_skb[entry].skb = skb;
4314 } 4318 }
4315 4319
4316 mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE); 4320 mapping = dma_map_single(&tp->pci_dev->dev, skb->data, len,
4321 PCI_DMA_TODEVICE);
4317 4322
4318 tp->tx_skb[entry].len = len; 4323 tp->tx_skb[entry].len = len;
4319 txd->addr = cpu_to_le64(mapping); 4324 txd->addr = cpu_to_le64(mapping);
@@ -4477,8 +4482,8 @@ static inline bool rtl8169_try_rx_copy(struct sk_buff **sk_buff,
4477 if (!skb) 4482 if (!skb)
4478 goto out; 4483 goto out;
4479 4484
4480 pci_dma_sync_single_for_cpu(tp->pci_dev, addr, pkt_size, 4485 dma_sync_single_for_cpu(&tp->pci_dev->dev, addr, pkt_size,
4481 PCI_DMA_FROMDEVICE); 4486 PCI_DMA_FROMDEVICE);
4482 skb_copy_from_linear_data(*sk_buff, skb->data, pkt_size); 4487 skb_copy_from_linear_data(*sk_buff, skb->data, pkt_size);
4483 *sk_buff = skb; 4488 *sk_buff = skb;
4484 done = true; 4489 done = true;
@@ -4549,11 +4554,11 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
4549 rtl8169_rx_csum(skb, desc); 4554 rtl8169_rx_csum(skb, desc);
4550 4555
4551 if (rtl8169_try_rx_copy(&skb, tp, pkt_size, addr)) { 4556 if (rtl8169_try_rx_copy(&skb, tp, pkt_size, addr)) {
4552 pci_dma_sync_single_for_device(pdev, addr, 4557 dma_sync_single_for_device(&pdev->dev, addr,
4553 pkt_size, PCI_DMA_FROMDEVICE); 4558 pkt_size, PCI_DMA_FROMDEVICE);
4554 rtl8169_mark_to_asic(desc, tp->rx_buf_sz); 4559 rtl8169_mark_to_asic(desc, tp->rx_buf_sz);
4555 } else { 4560 } else {
4556 pci_unmap_single(pdev, addr, tp->rx_buf_sz, 4561 dma_unmap_single(&pdev->dev, addr, tp->rx_buf_sz,
4557 PCI_DMA_FROMDEVICE); 4562 PCI_DMA_FROMDEVICE);
4558 tp->Rx_skbuff[entry] = NULL; 4563 tp->Rx_skbuff[entry] = NULL;
4559 } 4564 }
@@ -4583,7 +4588,7 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
4583 count = cur_rx - tp->cur_rx; 4588 count = cur_rx - tp->cur_rx;
4584 tp->cur_rx = cur_rx; 4589 tp->cur_rx = cur_rx;
4585 4590
4586 delta = rtl8169_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx); 4591 delta = rtl8169_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx, GFP_ATOMIC);
4587 if (!delta && count) 4592 if (!delta && count)
4588 netif_info(tp, intr, dev, "no Rx buffer allocated\n"); 4593 netif_info(tp, intr, dev, "no Rx buffer allocated\n");
4589 tp->dirty_rx += delta; 4594 tp->dirty_rx += delta;
@@ -4625,8 +4630,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
4625 } 4630 }
4626 4631
4627 /* Work around for rx fifo overflow */ 4632 /* Work around for rx fifo overflow */
4628 if (unlikely(status & RxFIFOOver) && 4633 if (unlikely(status & RxFIFOOver)) {
4629 (tp->mac_version == RTL_GIGA_MAC_VER_11)) {
4630 netif_stop_queue(dev); 4634 netif_stop_queue(dev);
4631 rtl8169_tx_timeout(dev); 4635 rtl8169_tx_timeout(dev);
4632 break; 4636 break;
@@ -4770,10 +4774,10 @@ static int rtl8169_close(struct net_device *dev)
4770 4774
4771 free_irq(dev->irq, dev); 4775 free_irq(dev->irq, dev);
4772 4776
4773 pci_free_consistent(pdev, R8169_RX_RING_BYTES, tp->RxDescArray, 4777 dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
4774 tp->RxPhyAddr); 4778 tp->RxPhyAddr);
4775 pci_free_consistent(pdev, R8169_TX_RING_BYTES, tp->TxDescArray, 4779 dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
4776 tp->TxPhyAddr); 4780 tp->TxPhyAddr);
4777 tp->TxDescArray = NULL; 4781 tp->TxDescArray = NULL;
4778 tp->RxDescArray = NULL; 4782 tp->RxDescArray = NULL;
4779 4783
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index 07eb884ff982..44150f2f7bfd 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -384,7 +384,7 @@ static void rionet_remove(struct rio_dev *rdev)
384 free_pages((unsigned long)rionet_active, rdev->net->hport->sys_size ? 384 free_pages((unsigned long)rionet_active, rdev->net->hport->sys_size ?
385 __ilog2(sizeof(void *)) + 4 : 0); 385 __ilog2(sizeof(void *)) + 4 : 0);
386 unregister_netdev(ndev); 386 unregister_netdev(ndev);
387 kfree(ndev); 387 free_netdev(ndev);
388 388
389 list_for_each_entry_safe(peer, tmp, &rionet_peers, node) { 389 list_for_each_entry_safe(peer, tmp, &rionet_peers, node) {
390 list_del(&peer->node); 390 list_del(&peer->node);
diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c
index cc4bd8c65f8b..9265315baa0b 100644
--- a/drivers/net/sgiseeq.c
+++ b/drivers/net/sgiseeq.c
@@ -804,7 +804,7 @@ static int __devinit sgiseeq_probe(struct platform_device *pdev)
804err_out_free_page: 804err_out_free_page:
805 free_page((unsigned long) sp->srings); 805 free_page((unsigned long) sp->srings);
806err_out_free_dev: 806err_out_free_dev:
807 kfree(dev); 807 free_netdev(dev);
808 808
809err_out: 809err_out:
810 return err; 810 return err;
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index f5a9eb1df593..79fd02bc69fd 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -1437,7 +1437,7 @@ static const struct net_device_ops sh_eth_netdev_ops = {
1437 1437
1438static int sh_eth_drv_probe(struct platform_device *pdev) 1438static int sh_eth_drv_probe(struct platform_device *pdev)
1439{ 1439{
1440 int ret, i, devno = 0; 1440 int ret, devno = 0;
1441 struct resource *res; 1441 struct resource *res;
1442 struct net_device *ndev = NULL; 1442 struct net_device *ndev = NULL;
1443 struct sh_eth_private *mdp; 1443 struct sh_eth_private *mdp;
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 40e5c46e7571..465ae7e84507 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -43,6 +43,7 @@
43#include <linux/seq_file.h> 43#include <linux/seq_file.h>
44#include <linux/mii.h> 44#include <linux/mii.h>
45#include <linux/slab.h> 45#include <linux/slab.h>
46#include <linux/dmi.h>
46#include <asm/irq.h> 47#include <asm/irq.h>
47 48
48#include "skge.h" 49#include "skge.h"
@@ -3868,6 +3869,8 @@ static void __devinit skge_show_addr(struct net_device *dev)
3868 netif_info(skge, probe, skge->netdev, "addr %pM\n", dev->dev_addr); 3869 netif_info(skge, probe, skge->netdev, "addr %pM\n", dev->dev_addr);
3869} 3870}
3870 3871
3872static int only_32bit_dma;
3873
3871static int __devinit skge_probe(struct pci_dev *pdev, 3874static int __devinit skge_probe(struct pci_dev *pdev,
3872 const struct pci_device_id *ent) 3875 const struct pci_device_id *ent)
3873{ 3876{
@@ -3889,7 +3892,7 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3889 3892
3890 pci_set_master(pdev); 3893 pci_set_master(pdev);
3891 3894
3892 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { 3895 if (!only_32bit_dma && !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3893 using_dac = 1; 3896 using_dac = 1;
3894 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 3897 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3895 } else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { 3898 } else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
@@ -4147,8 +4150,21 @@ static struct pci_driver skge_driver = {
4147 .shutdown = skge_shutdown, 4150 .shutdown = skge_shutdown,
4148}; 4151};
4149 4152
4153static struct dmi_system_id skge_32bit_dma_boards[] = {
4154 {
4155 .ident = "Gigabyte nForce boards",
4156 .matches = {
4157 DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co"),
4158 DMI_MATCH(DMI_BOARD_NAME, "nForce"),
4159 },
4160 },
4161 {}
4162};
4163
4150static int __init skge_init_module(void) 4164static int __init skge_init_module(void)
4151{ 4165{
4166 if (dmi_check_system(skge_32bit_dma_boards))
4167 only_32bit_dma = 1;
4152 skge_debug_init(); 4168 skge_debug_init();
4153 return pci_register_driver(&skge_driver); 4169 return pci_register_driver(&skge_driver);
4154} 4170}
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index 0909ae934ad0..8150ba154116 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -58,6 +58,7 @@
58 58
59MODULE_LICENSE("GPL"); 59MODULE_LICENSE("GPL");
60MODULE_VERSION(SMSC_DRV_VERSION); 60MODULE_VERSION(SMSC_DRV_VERSION);
61MODULE_ALIAS("platform:smsc911x");
61 62
62#if USE_DEBUG > 0 63#if USE_DEBUG > 0
63static int debug = 16; 64static int debug = 16;
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index bbb7951b9c4c..ea0461eb2dbe 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -1865,15 +1865,15 @@ static int stmmac_resume(struct platform_device *pdev)
1865 if (!netif_running(dev)) 1865 if (!netif_running(dev))
1866 return 0; 1866 return 0;
1867 1867
1868 spin_lock(&priv->lock);
1869
1870 if (priv->shutdown) { 1868 if (priv->shutdown) {
1871 /* Re-open the interface and re-init the MAC/DMA 1869 /* Re-open the interface and re-init the MAC/DMA
1872 and the rings. */ 1870 and the rings (i.e. on hibernation stage) */
1873 stmmac_open(dev); 1871 stmmac_open(dev);
1874 goto out_resume; 1872 return 0;
1875 } 1873 }
1876 1874
1875 spin_lock(&priv->lock);
1876
1877 /* Power Down bit, into the PM register, is cleared 1877 /* Power Down bit, into the PM register, is cleared
1878 * automatically as soon as a magic packet or a Wake-up frame 1878 * automatically as soon as a magic packet or a Wake-up frame
1879 * is received. Anyway, it's better to manually clear 1879 * is received. Anyway, it's better to manually clear
@@ -1901,7 +1901,6 @@ static int stmmac_resume(struct platform_device *pdev)
1901 1901
1902 netif_start_queue(dev); 1902 netif_start_queue(dev);
1903 1903
1904out_resume:
1905 spin_unlock(&priv->lock); 1904 spin_unlock(&priv->lock);
1906 return 0; 1905 return 0;
1907} 1906}
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index bc3af78a869f..1ec4b9e0239a 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -4666,7 +4666,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
4666 desc_idx, *post_ptr); 4666 desc_idx, *post_ptr);
4667 drop_it_no_recycle: 4667 drop_it_no_recycle:
4668 /* Other statistics kept track of by card. */ 4668 /* Other statistics kept track of by card. */
4669 tp->net_stats.rx_dropped++; 4669 tp->rx_dropped++;
4670 goto next_pkt; 4670 goto next_pkt;
4671 } 4671 }
4672 4672
@@ -4726,7 +4726,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
4726 if (len > (tp->dev->mtu + ETH_HLEN) && 4726 if (len > (tp->dev->mtu + ETH_HLEN) &&
4727 skb->protocol != htons(ETH_P_8021Q)) { 4727 skb->protocol != htons(ETH_P_8021Q)) {
4728 dev_kfree_skb(skb); 4728 dev_kfree_skb(skb);
4729 goto next_pkt; 4729 goto drop_it_no_recycle;
4730 } 4730 }
4731 4731
4732 if (desc->type_flags & RXD_FLAG_VLAN && 4732 if (desc->type_flags & RXD_FLAG_VLAN &&
@@ -9240,6 +9240,8 @@ static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9240 stats->rx_missed_errors = old_stats->rx_missed_errors + 9240 stats->rx_missed_errors = old_stats->rx_missed_errors +
9241 get_stat64(&hw_stats->rx_discards); 9241 get_stat64(&hw_stats->rx_discards);
9242 9242
9243 stats->rx_dropped = tp->rx_dropped;
9244
9243 return stats; 9245 return stats;
9244} 9246}
9245 9247
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 4937bd190964..be7ff138a7f9 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -2759,7 +2759,7 @@ struct tg3 {
2759 2759
2760 2760
2761 /* begin "everything else" cacheline(s) section */ 2761 /* begin "everything else" cacheline(s) section */
2762 struct rtnl_link_stats64 net_stats; 2762 unsigned long rx_dropped;
2763 struct rtnl_link_stats64 net_stats_prev; 2763 struct rtnl_link_stats64 net_stats_prev;
2764 struct tg3_ethtool_stats estats; 2764 struct tg3_ethtool_stats estats;
2765 struct tg3_ethtool_stats estats_prev; 2765 struct tg3_ethtool_stats estats_prev;
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index 5efa57757a2c..6888e3d41462 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -243,6 +243,7 @@ enum {
243 NWayState = (1 << 14) | (1 << 13) | (1 << 12), 243 NWayState = (1 << 14) | (1 << 13) | (1 << 12),
244 NWayRestart = (1 << 12), 244 NWayRestart = (1 << 12),
245 NonselPortActive = (1 << 9), 245 NonselPortActive = (1 << 9),
246 SelPortActive = (1 << 8),
246 LinkFailStatus = (1 << 2), 247 LinkFailStatus = (1 << 2),
247 NetCxnErr = (1 << 1), 248 NetCxnErr = (1 << 1),
248}; 249};
@@ -363,7 +364,9 @@ static u16 t21040_csr15[] = { 0, 0, 0x0006, 0x0000, 0x0000, };
363 364
364/* 21041 transceiver register settings: TP AUTO, BNC, AUI, TP, TP FD*/ 365/* 21041 transceiver register settings: TP AUTO, BNC, AUI, TP, TP FD*/
365static u16 t21041_csr13[] = { 0xEF01, 0xEF09, 0xEF09, 0xEF01, 0xEF09, }; 366static u16 t21041_csr13[] = { 0xEF01, 0xEF09, 0xEF09, 0xEF01, 0xEF09, };
366static u16 t21041_csr14[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x6F3F, 0x6F3D, }; 367static u16 t21041_csr14[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, };
368/* If on-chip autonegotiation is broken, use half-duplex (FF3F) instead */
369static u16 t21041_csr14_brk[] = { 0xFF3F, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, };
367static u16 t21041_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, }; 370static u16 t21041_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, };
368 371
369 372
@@ -1064,6 +1067,9 @@ static void de21041_media_timer (unsigned long data)
1064 unsigned int carrier; 1067 unsigned int carrier;
1065 unsigned long flags; 1068 unsigned long flags;
1066 1069
1070 /* clear port active bits */
1071 dw32(SIAStatus, NonselPortActive | SelPortActive);
1072
1067 carrier = (status & NetCxnErr) ? 0 : 1; 1073 carrier = (status & NetCxnErr) ? 0 : 1;
1068 1074
1069 if (carrier) { 1075 if (carrier) {
@@ -1158,14 +1164,29 @@ no_link_yet:
1158static void de_media_interrupt (struct de_private *de, u32 status) 1164static void de_media_interrupt (struct de_private *de, u32 status)
1159{ 1165{
1160 if (status & LinkPass) { 1166 if (status & LinkPass) {
1167 /* Ignore if current media is AUI or BNC and we can't use TP */
1168 if ((de->media_type == DE_MEDIA_AUI ||
1169 de->media_type == DE_MEDIA_BNC) &&
1170 (de->media_lock ||
1171 !de_ok_to_advertise(de, DE_MEDIA_TP_AUTO)))
1172 return;
1173 /* If current media is not TP, change it to TP */
1174 if ((de->media_type == DE_MEDIA_AUI ||
1175 de->media_type == DE_MEDIA_BNC)) {
1176 de->media_type = DE_MEDIA_TP_AUTO;
1177 de_stop_rxtx(de);
1178 de_set_media(de);
1179 de_start_rxtx(de);
1180 }
1161 de_link_up(de); 1181 de_link_up(de);
1162 mod_timer(&de->media_timer, jiffies + DE_TIMER_LINK); 1182 mod_timer(&de->media_timer, jiffies + DE_TIMER_LINK);
1163 return; 1183 return;
1164 } 1184 }
1165 1185
1166 BUG_ON(!(status & LinkFail)); 1186 BUG_ON(!(status & LinkFail));
1167 1187 /* Mark the link as down only if current media is TP */
1168 if (netif_carrier_ok(de->dev)) { 1188 if (netif_carrier_ok(de->dev) && de->media_type != DE_MEDIA_AUI &&
1189 de->media_type != DE_MEDIA_BNC) {
1169 de_link_down(de); 1190 de_link_down(de);
1170 mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK); 1191 mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
1171 } 1192 }
@@ -1229,6 +1250,7 @@ static void de_adapter_sleep (struct de_private *de)
1229 if (de->de21040) 1250 if (de->de21040)
1230 return; 1251 return;
1231 1252
1253 dw32(CSR13, 0); /* Reset phy */
1232 pci_read_config_dword(de->pdev, PCIPM, &pmctl); 1254 pci_read_config_dword(de->pdev, PCIPM, &pmctl);
1233 pmctl |= PM_Sleep; 1255 pmctl |= PM_Sleep;
1234 pci_write_config_dword(de->pdev, PCIPM, pmctl); 1256 pci_write_config_dword(de->pdev, PCIPM, pmctl);
@@ -1574,12 +1596,15 @@ static int __de_set_settings(struct de_private *de, struct ethtool_cmd *ecmd)
1574 return 0; /* nothing to change */ 1596 return 0; /* nothing to change */
1575 1597
1576 de_link_down(de); 1598 de_link_down(de);
1599 mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
1577 de_stop_rxtx(de); 1600 de_stop_rxtx(de);
1578 1601
1579 de->media_type = new_media; 1602 de->media_type = new_media;
1580 de->media_lock = media_lock; 1603 de->media_lock = media_lock;
1581 de->media_advertise = ecmd->advertising; 1604 de->media_advertise = ecmd->advertising;
1582 de_set_media(de); 1605 de_set_media(de);
1606 if (netif_running(de->dev))
1607 de_start_rxtx(de);
1583 1608
1584 return 0; 1609 return 0;
1585} 1610}
@@ -1911,8 +1936,14 @@ fill_defaults:
1911 for (i = 0; i < DE_MAX_MEDIA; i++) { 1936 for (i = 0; i < DE_MAX_MEDIA; i++) {
1912 if (de->media[i].csr13 == 0xffff) 1937 if (de->media[i].csr13 == 0xffff)
1913 de->media[i].csr13 = t21041_csr13[i]; 1938 de->media[i].csr13 = t21041_csr13[i];
1914 if (de->media[i].csr14 == 0xffff) 1939 if (de->media[i].csr14 == 0xffff) {
1915 de->media[i].csr14 = t21041_csr14[i]; 1940 /* autonegotiation is broken at least on some chip
1941 revisions - rev. 0x21 works, 0x11 does not */
1942 if (de->pdev->revision < 0x20)
1943 de->media[i].csr14 = t21041_csr14_brk[i];
1944 else
1945 de->media[i].csr14 = t21041_csr14[i];
1946 }
1916 if (de->media[i].csr15 == 0xffff) 1947 if (de->media[i].csr15 == 0xffff)
1917 de->media[i].csr15 = t21041_csr15[i]; 1948 de->media[i].csr15 = t21041_csr15[i];
1918 } 1949 }
@@ -2158,6 +2189,8 @@ static int de_resume (struct pci_dev *pdev)
2158 dev_err(&dev->dev, "pci_enable_device failed in resume\n"); 2189 dev_err(&dev->dev, "pci_enable_device failed in resume\n");
2159 goto out; 2190 goto out;
2160 } 2191 }
2192 pci_set_master(pdev);
2193 de_init_rings(de);
2161 de_init_hw(de); 2194 de_init_hw(de);
2162out_attach: 2195out_attach:
2163 netif_device_attach(dev); 2196 netif_device_attach(dev);
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 6efca66b8766..1cd752f9a6e1 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -1652,6 +1652,8 @@ static int hso_get_count(struct hso_serial *serial,
1652 struct uart_icount cnow; 1652 struct uart_icount cnow;
1653 struct hso_tiocmget *tiocmget = serial->tiocmget; 1653 struct hso_tiocmget *tiocmget = serial->tiocmget;
1654 1654
1655 memset(&icount, 0, sizeof(struct serial_icounter_struct));
1656
1655 if (!tiocmget) 1657 if (!tiocmget)
1656 return -ENOENT; 1658 return -ENOENT;
1657 spin_lock_irq(&serial->serial_lock); 1659 spin_lock_irq(&serial->serial_lock);
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index 08e7b6abacdd..b2bcf99e6f08 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -58,6 +58,7 @@
58#define USB_PRODUCT_IPHONE 0x1290 58#define USB_PRODUCT_IPHONE 0x1290
59#define USB_PRODUCT_IPHONE_3G 0x1292 59#define USB_PRODUCT_IPHONE_3G 0x1292
60#define USB_PRODUCT_IPHONE_3GS 0x1294 60#define USB_PRODUCT_IPHONE_3GS 0x1294
61#define USB_PRODUCT_IPHONE_4 0x1297
61 62
62#define IPHETH_USBINTF_CLASS 255 63#define IPHETH_USBINTF_CLASS 255
63#define IPHETH_USBINTF_SUBCLASS 253 64#define IPHETH_USBINTF_SUBCLASS 253
@@ -92,6 +93,10 @@ static struct usb_device_id ipheth_table[] = {
92 USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_3GS, 93 USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_3GS,
93 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, 94 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
94 IPHETH_USBINTF_PROTO) }, 95 IPHETH_USBINTF_PROTO) },
96 { USB_DEVICE_AND_INTERFACE_INFO(
97 USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4,
98 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
99 IPHETH_USBINTF_PROTO) },
95 { } 100 { }
96}; 101};
97MODULE_DEVICE_TABLE(usb, ipheth_table); 102MODULE_DEVICE_TABLE(usb, ipheth_table);
@@ -424,10 +429,6 @@ static const struct net_device_ops ipheth_netdev_ops = {
424 .ndo_get_stats = &ipheth_stats, 429 .ndo_get_stats = &ipheth_stats,
425}; 430};
426 431
427static struct device_type ipheth_type = {
428 .name = "wwan",
429};
430
431static int ipheth_probe(struct usb_interface *intf, 432static int ipheth_probe(struct usb_interface *intf,
432 const struct usb_device_id *id) 433 const struct usb_device_id *id)
433{ 434{
@@ -445,7 +446,7 @@ static int ipheth_probe(struct usb_interface *intf,
445 446
446 netdev->netdev_ops = &ipheth_netdev_ops; 447 netdev->netdev_ops = &ipheth_netdev_ops;
447 netdev->watchdog_timeo = IPHETH_TX_TIMEOUT; 448 netdev->watchdog_timeo = IPHETH_TX_TIMEOUT;
448 strcpy(netdev->name, "wwan%d"); 449 strcpy(netdev->name, "eth%d");
449 450
450 dev = netdev_priv(netdev); 451 dev = netdev_priv(netdev);
451 dev->udev = udev; 452 dev->udev = udev;
@@ -495,7 +496,6 @@ static int ipheth_probe(struct usb_interface *intf,
495 496
496 SET_NETDEV_DEV(netdev, &intf->dev); 497 SET_NETDEV_DEV(netdev, &intf->dev);
497 SET_ETHTOOL_OPS(netdev, &ops); 498 SET_ETHTOOL_OPS(netdev, &ops);
498 SET_NETDEV_DEVTYPE(netdev, &ipheth_type);
499 499
500 retval = register_netdev(netdev); 500 retval = register_netdev(netdev);
501 if (retval) { 501 if (retval) {
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index fd69095ef6e3..f53412368ce1 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -2824,7 +2824,7 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi
2824 netif_napi_add(dev, &vptr->napi, velocity_poll, VELOCITY_NAPI_WEIGHT); 2824 netif_napi_add(dev, &vptr->napi, velocity_poll, VELOCITY_NAPI_WEIGHT);
2825 2825
2826 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | 2826 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
2827 NETIF_F_HW_VLAN_RX | NETIF_F_IP_CSUM | NETIF_F_SG; 2827 NETIF_F_HW_VLAN_RX | NETIF_F_IP_CSUM;
2828 2828
2829 ret = register_netdev(dev); 2829 ret = register_netdev(dev);
2830 if (ret < 0) 2830 if (ret < 0)
diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c
index 8cc9e319f435..1737d1488b35 100644
--- a/drivers/net/wimax/i2400m/rx.c
+++ b/drivers/net/wimax/i2400m/rx.c
@@ -1244,16 +1244,16 @@ int i2400m_rx(struct i2400m *i2400m, struct sk_buff *skb)
1244 int i, result; 1244 int i, result;
1245 struct device *dev = i2400m_dev(i2400m); 1245 struct device *dev = i2400m_dev(i2400m);
1246 const struct i2400m_msg_hdr *msg_hdr; 1246 const struct i2400m_msg_hdr *msg_hdr;
1247 size_t pl_itr, pl_size, skb_len; 1247 size_t pl_itr, pl_size;
1248 unsigned long flags; 1248 unsigned long flags;
1249 unsigned num_pls, single_last; 1249 unsigned num_pls, single_last, skb_len;
1250 1250
1251 skb_len = skb->len; 1251 skb_len = skb->len;
1252 d_fnstart(4, dev, "(i2400m %p skb %p [size %zu])\n", 1252 d_fnstart(4, dev, "(i2400m %p skb %p [size %u])\n",
1253 i2400m, skb, skb_len); 1253 i2400m, skb, skb_len);
1254 result = -EIO; 1254 result = -EIO;
1255 msg_hdr = (void *) skb->data; 1255 msg_hdr = (void *) skb->data;
1256 result = i2400m_rx_msg_hdr_check(i2400m, msg_hdr, skb->len); 1256 result = i2400m_rx_msg_hdr_check(i2400m, msg_hdr, skb_len);
1257 if (result < 0) 1257 if (result < 0)
1258 goto error_msg_hdr_check; 1258 goto error_msg_hdr_check;
1259 result = -EIO; 1259 result = -EIO;
@@ -1261,10 +1261,10 @@ int i2400m_rx(struct i2400m *i2400m, struct sk_buff *skb)
1261 pl_itr = sizeof(*msg_hdr) + /* Check payload descriptor(s) */ 1261 pl_itr = sizeof(*msg_hdr) + /* Check payload descriptor(s) */
1262 num_pls * sizeof(msg_hdr->pld[0]); 1262 num_pls * sizeof(msg_hdr->pld[0]);
1263 pl_itr = ALIGN(pl_itr, I2400M_PL_ALIGN); 1263 pl_itr = ALIGN(pl_itr, I2400M_PL_ALIGN);
1264 if (pl_itr > skb->len) { /* got all the payload descriptors? */ 1264 if (pl_itr > skb_len) { /* got all the payload descriptors? */
1265 dev_err(dev, "RX: HW BUG? message too short (%u bytes) for " 1265 dev_err(dev, "RX: HW BUG? message too short (%u bytes) for "
1266 "%u payload descriptors (%zu each, total %zu)\n", 1266 "%u payload descriptors (%zu each, total %zu)\n",
1267 skb->len, num_pls, sizeof(msg_hdr->pld[0]), pl_itr); 1267 skb_len, num_pls, sizeof(msg_hdr->pld[0]), pl_itr);
1268 goto error_pl_descr_short; 1268 goto error_pl_descr_short;
1269 } 1269 }
1270 /* Walk each payload payload--check we really got it */ 1270 /* Walk each payload payload--check we really got it */
@@ -1272,7 +1272,7 @@ int i2400m_rx(struct i2400m *i2400m, struct sk_buff *skb)
1272 /* work around old gcc warnings */ 1272 /* work around old gcc warnings */
1273 pl_size = i2400m_pld_size(&msg_hdr->pld[i]); 1273 pl_size = i2400m_pld_size(&msg_hdr->pld[i]);
1274 result = i2400m_rx_pl_descr_check(i2400m, &msg_hdr->pld[i], 1274 result = i2400m_rx_pl_descr_check(i2400m, &msg_hdr->pld[i],
1275 pl_itr, skb->len); 1275 pl_itr, skb_len);
1276 if (result < 0) 1276 if (result < 0)
1277 goto error_pl_descr_check; 1277 goto error_pl_descr_check;
1278 single_last = num_pls == 1 || i == num_pls - 1; 1278 single_last = num_pls == 1 || i == num_pls - 1;
@@ -1290,16 +1290,16 @@ int i2400m_rx(struct i2400m *i2400m, struct sk_buff *skb)
1290 if (i < i2400m->rx_pl_min) 1290 if (i < i2400m->rx_pl_min)
1291 i2400m->rx_pl_min = i; 1291 i2400m->rx_pl_min = i;
1292 i2400m->rx_num++; 1292 i2400m->rx_num++;
1293 i2400m->rx_size_acc += skb->len; 1293 i2400m->rx_size_acc += skb_len;
1294 if (skb->len < i2400m->rx_size_min) 1294 if (skb_len < i2400m->rx_size_min)
1295 i2400m->rx_size_min = skb->len; 1295 i2400m->rx_size_min = skb_len;
1296 if (skb->len > i2400m->rx_size_max) 1296 if (skb_len > i2400m->rx_size_max)
1297 i2400m->rx_size_max = skb->len; 1297 i2400m->rx_size_max = skb_len;
1298 spin_unlock_irqrestore(&i2400m->rx_lock, flags); 1298 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
1299error_pl_descr_check: 1299error_pl_descr_check:
1300error_pl_descr_short: 1300error_pl_descr_short:
1301error_msg_hdr_check: 1301error_msg_hdr_check:
1302 d_fnend(4, dev, "(i2400m %p skb %p [size %zu]) = %d\n", 1302 d_fnend(4, dev, "(i2400m %p skb %p [size %u]) = %d\n",
1303 i2400m, skb, skb_len, result); 1303 i2400m, skb, skb_len, result);
1304 return result; 1304 return result;
1305} 1305}
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index a105087af963..f9aa1bc0a947 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -732,7 +732,7 @@ static int adm8211_rf_set_channel(struct ieee80211_hw *dev, unsigned int chan)
732 732
733 /* Nothing to do for ADMtek BBP */ 733 /* Nothing to do for ADMtek BBP */
734 } else if (priv->bbp_type != ADM8211_TYPE_ADMTEK) 734 } else if (priv->bbp_type != ADM8211_TYPE_ADMTEK)
735 wiphy_debug(dev->wiphy, "unsupported bbp type %d\n", 735 wiphy_debug(dev->wiphy, "unsupported BBP type %d\n",
736 priv->bbp_type); 736 priv->bbp_type);
737 737
738 ADM8211_RESTORE(); 738 ADM8211_RESTORE();
@@ -1032,7 +1032,7 @@ static int adm8211_hw_init_bbp(struct ieee80211_hw *dev)
1032 break; 1032 break;
1033 } 1033 }
1034 } else 1034 } else
1035 wiphy_debug(dev->wiphy, "unsupported bbp %d\n", priv->bbp_type); 1035 wiphy_debug(dev->wiphy, "unsupported BBP %d\n", priv->bbp_type);
1036 1036
1037 ADM8211_CSR_WRITE(SYNRF, 0); 1037 ADM8211_CSR_WRITE(SYNRF, 0);
1038 1038
@@ -1525,7 +1525,7 @@ static int adm8211_start(struct ieee80211_hw *dev)
1525 retval = request_irq(priv->pdev->irq, adm8211_interrupt, 1525 retval = request_irq(priv->pdev->irq, adm8211_interrupt,
1526 IRQF_SHARED, "adm8211", dev); 1526 IRQF_SHARED, "adm8211", dev);
1527 if (retval) { 1527 if (retval) {
1528 wiphy_err(dev->wiphy, "failed to register irq handler\n"); 1528 wiphy_err(dev->wiphy, "failed to register IRQ handler\n");
1529 goto fail; 1529 goto fail;
1530 } 1530 }
1531 1531
@@ -1902,7 +1902,7 @@ static int __devinit adm8211_probe(struct pci_dev *pdev,
1902 goto err_free_eeprom; 1902 goto err_free_eeprom;
1903 } 1903 }
1904 1904
1905 wiphy_info(dev->wiphy, "hwaddr %pm, rev 0x%02x\n", 1905 wiphy_info(dev->wiphy, "hwaddr %pM, Rev 0x%02x\n",
1906 dev->wiphy->perm_addr, pdev->revision); 1906 dev->wiphy->perm_addr, pdev->revision);
1907 1907
1908 return 0; 1908 return 0;
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
index d5140a87f073..1128fa8c9ed5 100644
--- a/drivers/net/wireless/at76c50x-usb.c
+++ b/drivers/net/wireless/at76c50x-usb.c
@@ -655,7 +655,7 @@ static int at76_get_hw_config(struct at76_priv *priv)
655exit: 655exit:
656 kfree(hwcfg); 656 kfree(hwcfg);
657 if (ret < 0) 657 if (ret < 0)
658 wiphy_err(priv->hw->wiphy, "cannot get hw config (error %d)\n", 658 wiphy_err(priv->hw->wiphy, "cannot get HW Config (error %d)\n",
659 ret); 659 ret);
660 660
661 return ret; 661 return ret;
@@ -960,7 +960,7 @@ static void at76_dump_mib_mac_addr(struct at76_priv *priv)
960 sizeof(struct mib_mac_addr)); 960 sizeof(struct mib_mac_addr));
961 if (ret < 0) { 961 if (ret < 0) {
962 wiphy_err(priv->hw->wiphy, 962 wiphy_err(priv->hw->wiphy,
963 "at76_get_mib (mac_addr) failed: %d\n", ret); 963 "at76_get_mib (MAC_ADDR) failed: %d\n", ret);
964 goto exit; 964 goto exit;
965 } 965 }
966 966
@@ -989,7 +989,7 @@ static void at76_dump_mib_mac_wep(struct at76_priv *priv)
989 sizeof(struct mib_mac_wep)); 989 sizeof(struct mib_mac_wep));
990 if (ret < 0) { 990 if (ret < 0) {
991 wiphy_err(priv->hw->wiphy, 991 wiphy_err(priv->hw->wiphy,
992 "at76_get_mib (mac_wep) failed: %d\n", ret); 992 "at76_get_mib (MAC_WEP) failed: %d\n", ret);
993 goto exit; 993 goto exit;
994 } 994 }
995 995
@@ -1026,7 +1026,7 @@ static void at76_dump_mib_mac_mgmt(struct at76_priv *priv)
1026 sizeof(struct mib_mac_mgmt)); 1026 sizeof(struct mib_mac_mgmt));
1027 if (ret < 0) { 1027 if (ret < 0) {
1028 wiphy_err(priv->hw->wiphy, 1028 wiphy_err(priv->hw->wiphy,
1029 "at76_get_mib (mac_mgmt) failed: %d\n", ret); 1029 "at76_get_mib (MAC_MGMT) failed: %d\n", ret);
1030 goto exit; 1030 goto exit;
1031 } 1031 }
1032 1032
@@ -1062,7 +1062,7 @@ static void at76_dump_mib_mac(struct at76_priv *priv)
1062 ret = at76_get_mib(priv->udev, MIB_MAC, m, sizeof(struct mib_mac)); 1062 ret = at76_get_mib(priv->udev, MIB_MAC, m, sizeof(struct mib_mac));
1063 if (ret < 0) { 1063 if (ret < 0) {
1064 wiphy_err(priv->hw->wiphy, 1064 wiphy_err(priv->hw->wiphy,
1065 "at76_get_mib (mac) failed: %d\n", ret); 1065 "at76_get_mib (MAC) failed: %d\n", ret);
1066 goto exit; 1066 goto exit;
1067 } 1067 }
1068 1068
@@ -1099,7 +1099,7 @@ static void at76_dump_mib_phy(struct at76_priv *priv)
1099 ret = at76_get_mib(priv->udev, MIB_PHY, m, sizeof(struct mib_phy)); 1099 ret = at76_get_mib(priv->udev, MIB_PHY, m, sizeof(struct mib_phy));
1100 if (ret < 0) { 1100 if (ret < 0) {
1101 wiphy_err(priv->hw->wiphy, 1101 wiphy_err(priv->hw->wiphy,
1102 "at76_get_mib (phy) failed: %d\n", ret); 1102 "at76_get_mib (PHY) failed: %d\n", ret);
1103 goto exit; 1103 goto exit;
1104 } 1104 }
1105 1105
@@ -1132,7 +1132,7 @@ static void at76_dump_mib_local(struct at76_priv *priv)
1132 ret = at76_get_mib(priv->udev, MIB_LOCAL, m, sizeof(struct mib_local)); 1132 ret = at76_get_mib(priv->udev, MIB_LOCAL, m, sizeof(struct mib_local));
1133 if (ret < 0) { 1133 if (ret < 0) {
1134 wiphy_err(priv->hw->wiphy, 1134 wiphy_err(priv->hw->wiphy,
1135 "at76_get_mib (local) failed: %d\n", ret); 1135 "at76_get_mib (LOCAL) failed: %d\n", ret);
1136 goto exit; 1136 goto exit;
1137 } 1137 }
1138 1138
@@ -1158,7 +1158,7 @@ static void at76_dump_mib_mdomain(struct at76_priv *priv)
1158 sizeof(struct mib_mdomain)); 1158 sizeof(struct mib_mdomain));
1159 if (ret < 0) { 1159 if (ret < 0) {
1160 wiphy_err(priv->hw->wiphy, 1160 wiphy_err(priv->hw->wiphy,
1161 "at76_get_mib (mdomain) failed: %d\n", ret); 1161 "at76_get_mib (MDOMAIN) failed: %d\n", ret);
1162 goto exit; 1162 goto exit;
1163 } 1163 }
1164 1164
@@ -1229,7 +1229,7 @@ static int at76_submit_rx_urb(struct at76_priv *priv)
1229 struct sk_buff *skb = priv->rx_skb; 1229 struct sk_buff *skb = priv->rx_skb;
1230 1230
1231 if (!priv->rx_urb) { 1231 if (!priv->rx_urb) {
1232 wiphy_err(priv->hw->wiphy, "%s: priv->rx_urb is null\n", 1232 wiphy_err(priv->hw->wiphy, "%s: priv->rx_urb is NULL\n",
1233 __func__); 1233 __func__);
1234 return -EFAULT; 1234 return -EFAULT;
1235 } 1235 }
@@ -1792,7 +1792,7 @@ static int at76_mac80211_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1792 wiphy_err(priv->hw->wiphy, "error in tx submit urb: %d\n", ret); 1792 wiphy_err(priv->hw->wiphy, "error in tx submit urb: %d\n", ret);
1793 if (ret == -EINVAL) 1793 if (ret == -EINVAL)
1794 wiphy_err(priv->hw->wiphy, 1794 wiphy_err(priv->hw->wiphy,
1795 "-einval: tx urb %p hcpriv %p complete %p\n", 1795 "-EINVAL: tx urb %p hcpriv %p complete %p\n",
1796 priv->tx_urb, 1796 priv->tx_urb,
1797 priv->tx_urb->hcpriv, priv->tx_urb->complete); 1797 priv->tx_urb->hcpriv, priv->tx_urb->complete);
1798 } 1798 }
@@ -2310,7 +2310,7 @@ static int at76_init_new_device(struct at76_priv *priv,
2310 2310
2311 priv->mac80211_registered = 1; 2311 priv->mac80211_registered = 1;
2312 2312
2313 wiphy_info(priv->hw->wiphy, "usb %s, mac %pm, firmware %d.%d.%d-%d\n", 2313 wiphy_info(priv->hw->wiphy, "USB %s, MAC %pM, firmware %d.%d.%d-%d\n",
2314 dev_name(&interface->dev), priv->mac_addr, 2314 dev_name(&interface->dev), priv->mac_addr,
2315 priv->fw_version.major, priv->fw_version.minor, 2315 priv->fw_version.major, priv->fw_version.minor,
2316 priv->fw_version.patch, priv->fw_version.build); 2316 priv->fw_version.patch, priv->fw_version.build);
diff --git a/drivers/net/wireless/ath/ar9170/main.c b/drivers/net/wireless/ath/ar9170/main.c
index c67b05f3bcbd..debfb0fbc7c5 100644
--- a/drivers/net/wireless/ath/ar9170/main.c
+++ b/drivers/net/wireless/ath/ar9170/main.c
@@ -245,7 +245,7 @@ static void __ar9170_dump_txstats(struct ar9170 *ar)
245{ 245{
246 int i; 246 int i;
247 247
248 wiphy_debug(ar->hw->wiphy, "qos queue stats\n"); 248 wiphy_debug(ar->hw->wiphy, "QoS queue stats\n");
249 249
250 for (i = 0; i < __AR9170_NUM_TXQ; i++) 250 for (i = 0; i < __AR9170_NUM_TXQ; i++)
251 wiphy_debug(ar->hw->wiphy, 251 wiphy_debug(ar->hw->wiphy,
@@ -387,7 +387,7 @@ static struct sk_buff *ar9170_get_queued_skb(struct ar9170 *ar,
387 if (mac && compare_ether_addr(ieee80211_get_DA(hdr), mac)) { 387 if (mac && compare_ether_addr(ieee80211_get_DA(hdr), mac)) {
388#ifdef AR9170_QUEUE_DEBUG 388#ifdef AR9170_QUEUE_DEBUG
389 wiphy_debug(ar->hw->wiphy, 389 wiphy_debug(ar->hw->wiphy,
390 "skip frame => da %pm != %pm\n", 390 "skip frame => DA %pM != %pM\n",
391 mac, ieee80211_get_DA(hdr)); 391 mac, ieee80211_get_DA(hdr));
392 ar9170_print_txheader(ar, skb); 392 ar9170_print_txheader(ar, skb);
393#endif /* AR9170_QUEUE_DEBUG */ 393#endif /* AR9170_QUEUE_DEBUG */
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 373dcfec689c..d77ce9906b6c 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -1327,6 +1327,10 @@ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
1327 PCI_DMA_TODEVICE); 1327 PCI_DMA_TODEVICE);
1328 1328
1329 rate = ieee80211_get_tx_rate(sc->hw, info); 1329 rate = ieee80211_get_tx_rate(sc->hw, info);
1330 if (!rate) {
1331 ret = -EINVAL;
1332 goto err_unmap;
1333 }
1330 1334
1331 if (info->flags & IEEE80211_TX_CTL_NO_ACK) 1335 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
1332 flags |= AR5K_TXDESC_NOACK; 1336 flags |= AR5K_TXDESC_NOACK;
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index cc648b6ae31c..a3d95cca8f0c 100644
--- a/drivers/net/wireless/ath/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -543,7 +543,7 @@ static u8 ath9k_hw_chan_2_clockrate_mhz(struct ath_hw *ah)
543 if (conf_is_ht40(conf)) 543 if (conf_is_ht40(conf))
544 return clockrate * 2; 544 return clockrate * 2;
545 545
546 return clockrate * 2; 546 return clockrate;
547} 547}
548 548
549static int32_t ath9k_hw_ani_get_listen_time(struct ath_hw *ah) 549static int32_t ath9k_hw_ani_get_listen_time(struct ath_hw *ah)
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index b883b174385b..057fb69ddf7f 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -797,7 +797,7 @@ static bool ar9300_uncompress_block(struct ath_hw *ah,
797 length = block[it+1]; 797 length = block[it+1];
798 length &= 0xff; 798 length &= 0xff;
799 799
800 if (length > 0 && spot >= 0 && spot+length < mdataSize) { 800 if (length > 0 && spot >= 0 && spot+length <= mdataSize) {
801 ath_print(common, ATH_DBG_EEPROM, 801 ath_print(common, ATH_DBG_EEPROM,
802 "Restore at %d: spot=%d " 802 "Restore at %d: spot=%d "
803 "offset=%d length=%d\n", 803 "offset=%d length=%d\n",
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h
index 7f48df1e2903..0b09db0f8e7d 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/eeprom.h
@@ -62,7 +62,7 @@
62 62
63#define SD_NO_CTL 0xE0 63#define SD_NO_CTL 0xE0
64#define NO_CTL 0xff 64#define NO_CTL 0xff
65#define CTL_MODE_M 7 65#define CTL_MODE_M 0xf
66#define CTL_11A 0 66#define CTL_11A 0
67#define CTL_11B 1 67#define CTL_11B 1
68#define CTL_11G 2 68#define CTL_11G 2
diff --git a/drivers/net/wireless/ath/regd.h b/drivers/net/wireless/ath/regd.h
index a1c39526161a..345dd9721b41 100644
--- a/drivers/net/wireless/ath/regd.h
+++ b/drivers/net/wireless/ath/regd.h
@@ -31,7 +31,6 @@ enum ctl_group {
31#define NO_CTL 0xff 31#define NO_CTL 0xff
32#define SD_NO_CTL 0xE0 32#define SD_NO_CTL 0xE0
33#define NO_CTL 0xff 33#define NO_CTL 0xff
34#define CTL_MODE_M 7
35#define CTL_11A 0 34#define CTL_11A 0
36#define CTL_11B 1 35#define CTL_11B 1
37#define CTL_11G 2 36#define CTL_11G 2
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 1189dbb6e2a6..996e9d7d7586 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -2723,14 +2723,6 @@ static void __ipw2100_rx_process(struct ipw2100_priv *priv)
2723 2723
2724 packet = &priv->rx_buffers[i]; 2724 packet = &priv->rx_buffers[i];
2725 2725
2726 /* Sync the DMA for the STATUS buffer so CPU is sure to get
2727 * the correct values */
2728 pci_dma_sync_single_for_cpu(priv->pci_dev,
2729 sq->nic +
2730 sizeof(struct ipw2100_status) * i,
2731 sizeof(struct ipw2100_status),
2732 PCI_DMA_FROMDEVICE);
2733
2734 /* Sync the DMA for the RX buffer so CPU is sure to get 2726 /* Sync the DMA for the RX buffer so CPU is sure to get
2735 * the correct values */ 2727 * the correct values */
2736 pci_dma_sync_single_for_cpu(priv->pci_dev, packet->dma_addr, 2728 pci_dma_sync_single_for_cpu(priv->pci_dev, packet->dma_addr,
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index fec026212326..0b779a41a142 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -265,7 +265,7 @@ struct iwl_cfg iwl1000_bgn_cfg = {
265 .support_ct_kill_exit = true, 265 .support_ct_kill_exit = true,
266 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF, 266 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF,
267 .chain_noise_scale = 1000, 267 .chain_noise_scale = 1000,
268 .monitor_recover_period = IWL_MONITORING_PERIOD, 268 .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
269 .max_event_log_size = 128, 269 .max_event_log_size = 128,
270 .ucode_tracing = true, 270 .ucode_tracing = true,
271 .sensitivity_calib_by_driver = true, 271 .sensitivity_calib_by_driver = true,
@@ -297,7 +297,7 @@ struct iwl_cfg iwl1000_bg_cfg = {
297 .support_ct_kill_exit = true, 297 .support_ct_kill_exit = true,
298 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF, 298 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF,
299 .chain_noise_scale = 1000, 299 .chain_noise_scale = 1000,
300 .monitor_recover_period = IWL_MONITORING_PERIOD, 300 .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
301 .max_event_log_size = 128, 301 .max_event_log_size = 128,
302 .ucode_tracing = true, 302 .ucode_tracing = true,
303 .sensitivity_calib_by_driver = true, 303 .sensitivity_calib_by_driver = true,
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index 6950a783913b..8ccfcd08218d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -2731,7 +2731,7 @@ static struct iwl_cfg iwl3945_bg_cfg = {
2731 .led_compensation = 64, 2731 .led_compensation = 64,
2732 .broken_powersave = true, 2732 .broken_powersave = true,
2733 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, 2733 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
2734 .monitor_recover_period = IWL_MONITORING_PERIOD, 2734 .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
2735 .max_event_log_size = 512, 2735 .max_event_log_size = 512,
2736 .tx_power_by_driver = true, 2736 .tx_power_by_driver = true,
2737}; 2737};
@@ -2752,7 +2752,7 @@ static struct iwl_cfg iwl3945_abg_cfg = {
2752 .led_compensation = 64, 2752 .led_compensation = 64,
2753 .broken_powersave = true, 2753 .broken_powersave = true,
2754 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, 2754 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
2755 .monitor_recover_period = IWL_MONITORING_PERIOD, 2755 .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
2756 .max_event_log_size = 512, 2756 .max_event_log_size = 512,
2757 .tx_power_by_driver = true, 2757 .tx_power_by_driver = true,
2758}; 2758};
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index d6da356608fa..d92b72909233 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -2322,7 +2322,7 @@ struct iwl_cfg iwl4965_agn_cfg = {
2322 .led_compensation = 61, 2322 .led_compensation = 61,
2323 .chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS, 2323 .chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS,
2324 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, 2324 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
2325 .monitor_recover_period = IWL_MONITORING_PERIOD, 2325 .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
2326 .temperature_kelvin = true, 2326 .temperature_kelvin = true,
2327 .max_event_log_size = 512, 2327 .max_event_log_size = 512,
2328 .tx_power_by_driver = true, 2328 .tx_power_by_driver = true,
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index aacf3770f075..48bdcd8d2e94 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -510,7 +510,7 @@ struct iwl_cfg iwl5300_agn_cfg = {
510 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 510 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
511 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, 511 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
512 .chain_noise_scale = 1000, 512 .chain_noise_scale = 1000,
513 .monitor_recover_period = IWL_MONITORING_PERIOD, 513 .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
514 .max_event_log_size = 512, 514 .max_event_log_size = 512,
515 .ucode_tracing = true, 515 .ucode_tracing = true,
516 .sensitivity_calib_by_driver = true, 516 .sensitivity_calib_by_driver = true,
@@ -541,7 +541,7 @@ struct iwl_cfg iwl5100_bgn_cfg = {
541 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 541 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
542 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, 542 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
543 .chain_noise_scale = 1000, 543 .chain_noise_scale = 1000,
544 .monitor_recover_period = IWL_MONITORING_PERIOD, 544 .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
545 .max_event_log_size = 512, 545 .max_event_log_size = 512,
546 .ucode_tracing = true, 546 .ucode_tracing = true,
547 .sensitivity_calib_by_driver = true, 547 .sensitivity_calib_by_driver = true,
@@ -570,7 +570,7 @@ struct iwl_cfg iwl5100_abg_cfg = {
570 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 570 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
571 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, 571 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
572 .chain_noise_scale = 1000, 572 .chain_noise_scale = 1000,
573 .monitor_recover_period = IWL_MONITORING_PERIOD, 573 .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
574 .max_event_log_size = 512, 574 .max_event_log_size = 512,
575 .ucode_tracing = true, 575 .ucode_tracing = true,
576 .sensitivity_calib_by_driver = true, 576 .sensitivity_calib_by_driver = true,
@@ -601,7 +601,7 @@ struct iwl_cfg iwl5100_agn_cfg = {
601 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 601 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
602 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, 602 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
603 .chain_noise_scale = 1000, 603 .chain_noise_scale = 1000,
604 .monitor_recover_period = IWL_MONITORING_PERIOD, 604 .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
605 .max_event_log_size = 512, 605 .max_event_log_size = 512,
606 .ucode_tracing = true, 606 .ucode_tracing = true,
607 .sensitivity_calib_by_driver = true, 607 .sensitivity_calib_by_driver = true,
@@ -632,7 +632,7 @@ struct iwl_cfg iwl5350_agn_cfg = {
632 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 632 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
633 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, 633 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
634 .chain_noise_scale = 1000, 634 .chain_noise_scale = 1000,
635 .monitor_recover_period = IWL_MONITORING_PERIOD, 635 .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
636 .max_event_log_size = 512, 636 .max_event_log_size = 512,
637 .ucode_tracing = true, 637 .ucode_tracing = true,
638 .sensitivity_calib_by_driver = true, 638 .sensitivity_calib_by_driver = true,
@@ -663,7 +663,7 @@ struct iwl_cfg iwl5150_agn_cfg = {
663 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 663 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
664 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, 664 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
665 .chain_noise_scale = 1000, 665 .chain_noise_scale = 1000,
666 .monitor_recover_period = IWL_MONITORING_PERIOD, 666 .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
667 .max_event_log_size = 512, 667 .max_event_log_size = 512,
668 .ucode_tracing = true, 668 .ucode_tracing = true,
669 .sensitivity_calib_by_driver = true, 669 .sensitivity_calib_by_driver = true,
@@ -693,7 +693,7 @@ struct iwl_cfg iwl5150_abg_cfg = {
693 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 693 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
694 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, 694 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
695 .chain_noise_scale = 1000, 695 .chain_noise_scale = 1000,
696 .monitor_recover_period = IWL_MONITORING_PERIOD, 696 .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
697 .max_event_log_size = 512, 697 .max_event_log_size = 512,
698 .ucode_tracing = true, 698 .ucode_tracing = true,
699 .sensitivity_calib_by_driver = true, 699 .sensitivity_calib_by_driver = true,
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index af4fd50f3405..cee06b968de8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -388,7 +388,7 @@ struct iwl_cfg iwl6000g2a_2agn_cfg = {
388 .support_ct_kill_exit = true, 388 .support_ct_kill_exit = true,
389 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, 389 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
390 .chain_noise_scale = 1000, 390 .chain_noise_scale = 1000,
391 .monitor_recover_period = IWL_MONITORING_PERIOD, 391 .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
392 .max_event_log_size = 512, 392 .max_event_log_size = 512,
393 .ucode_tracing = true, 393 .ucode_tracing = true,
394 .sensitivity_calib_by_driver = true, 394 .sensitivity_calib_by_driver = true,
@@ -424,7 +424,7 @@ struct iwl_cfg iwl6000g2a_2abg_cfg = {
424 .support_ct_kill_exit = true, 424 .support_ct_kill_exit = true,
425 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, 425 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
426 .chain_noise_scale = 1000, 426 .chain_noise_scale = 1000,
427 .monitor_recover_period = IWL_MONITORING_PERIOD, 427 .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
428 .max_event_log_size = 512, 428 .max_event_log_size = 512,
429 .sensitivity_calib_by_driver = true, 429 .sensitivity_calib_by_driver = true,
430 .chain_noise_calib_by_driver = true, 430 .chain_noise_calib_by_driver = true,
@@ -459,7 +459,7 @@ struct iwl_cfg iwl6000g2a_2bg_cfg = {
459 .support_ct_kill_exit = true, 459 .support_ct_kill_exit = true,
460 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, 460 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
461 .chain_noise_scale = 1000, 461 .chain_noise_scale = 1000,
462 .monitor_recover_period = IWL_MONITORING_PERIOD, 462 .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
463 .max_event_log_size = 512, 463 .max_event_log_size = 512,
464 .sensitivity_calib_by_driver = true, 464 .sensitivity_calib_by_driver = true,
465 .chain_noise_calib_by_driver = true, 465 .chain_noise_calib_by_driver = true,
@@ -496,7 +496,7 @@ struct iwl_cfg iwl6000g2b_2agn_cfg = {
496 .support_ct_kill_exit = true, 496 .support_ct_kill_exit = true,
497 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, 497 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
498 .chain_noise_scale = 1000, 498 .chain_noise_scale = 1000,
499 .monitor_recover_period = IWL_MONITORING_PERIOD, 499 .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
500 .max_event_log_size = 512, 500 .max_event_log_size = 512,
501 .sensitivity_calib_by_driver = true, 501 .sensitivity_calib_by_driver = true,
502 .chain_noise_calib_by_driver = true, 502 .chain_noise_calib_by_driver = true,
@@ -532,7 +532,7 @@ struct iwl_cfg iwl6000g2b_2abg_cfg = {
532 .support_ct_kill_exit = true, 532 .support_ct_kill_exit = true,
533 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, 533 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
534 .chain_noise_scale = 1000, 534 .chain_noise_scale = 1000,
535 .monitor_recover_period = IWL_MONITORING_PERIOD, 535 .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
536 .max_event_log_size = 512, 536 .max_event_log_size = 512,
537 .sensitivity_calib_by_driver = true, 537 .sensitivity_calib_by_driver = true,
538 .chain_noise_calib_by_driver = true, 538 .chain_noise_calib_by_driver = true,
@@ -570,7 +570,7 @@ struct iwl_cfg iwl6000g2b_2bgn_cfg = {
570 .support_ct_kill_exit = true, 570 .support_ct_kill_exit = true,
571 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, 571 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
572 .chain_noise_scale = 1000, 572 .chain_noise_scale = 1000,
573 .monitor_recover_period = IWL_MONITORING_PERIOD, 573 .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
574 .max_event_log_size = 512, 574 .max_event_log_size = 512,
575 .sensitivity_calib_by_driver = true, 575 .sensitivity_calib_by_driver = true,
576 .chain_noise_calib_by_driver = true, 576 .chain_noise_calib_by_driver = true,
@@ -606,7 +606,7 @@ struct iwl_cfg iwl6000g2b_2bg_cfg = {
606 .support_ct_kill_exit = true, 606 .support_ct_kill_exit = true,
607 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, 607 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
608 .chain_noise_scale = 1000, 608 .chain_noise_scale = 1000,
609 .monitor_recover_period = IWL_MONITORING_PERIOD, 609 .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
610 .max_event_log_size = 512, 610 .max_event_log_size = 512,
611 .sensitivity_calib_by_driver = true, 611 .sensitivity_calib_by_driver = true,
612 .chain_noise_calib_by_driver = true, 612 .chain_noise_calib_by_driver = true,
@@ -644,7 +644,7 @@ struct iwl_cfg iwl6000g2b_bgn_cfg = {
644 .support_ct_kill_exit = true, 644 .support_ct_kill_exit = true,
645 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, 645 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
646 .chain_noise_scale = 1000, 646 .chain_noise_scale = 1000,
647 .monitor_recover_period = IWL_MONITORING_PERIOD, 647 .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
648 .max_event_log_size = 512, 648 .max_event_log_size = 512,
649 .sensitivity_calib_by_driver = true, 649 .sensitivity_calib_by_driver = true,
650 .chain_noise_calib_by_driver = true, 650 .chain_noise_calib_by_driver = true,
@@ -680,7 +680,7 @@ struct iwl_cfg iwl6000g2b_bg_cfg = {
680 .support_ct_kill_exit = true, 680 .support_ct_kill_exit = true,
681 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, 681 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
682 .chain_noise_scale = 1000, 682 .chain_noise_scale = 1000,
683 .monitor_recover_period = IWL_MONITORING_PERIOD, 683 .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
684 .max_event_log_size = 512, 684 .max_event_log_size = 512,
685 .sensitivity_calib_by_driver = true, 685 .sensitivity_calib_by_driver = true,
686 .chain_noise_calib_by_driver = true, 686 .chain_noise_calib_by_driver = true,
@@ -721,7 +721,7 @@ struct iwl_cfg iwl6000i_2agn_cfg = {
721 .support_ct_kill_exit = true, 721 .support_ct_kill_exit = true,
722 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, 722 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
723 .chain_noise_scale = 1000, 723 .chain_noise_scale = 1000,
724 .monitor_recover_period = IWL_MONITORING_PERIOD, 724 .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
725 .max_event_log_size = 1024, 725 .max_event_log_size = 1024,
726 .ucode_tracing = true, 726 .ucode_tracing = true,
727 .sensitivity_calib_by_driver = true, 727 .sensitivity_calib_by_driver = true,
@@ -756,7 +756,7 @@ struct iwl_cfg iwl6000i_2abg_cfg = {
756 .support_ct_kill_exit = true, 756 .support_ct_kill_exit = true,
757 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, 757 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
758 .chain_noise_scale = 1000, 758 .chain_noise_scale = 1000,
759 .monitor_recover_period = IWL_MONITORING_PERIOD, 759 .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
760 .max_event_log_size = 1024, 760 .max_event_log_size = 1024,
761 .ucode_tracing = true, 761 .ucode_tracing = true,
762 .sensitivity_calib_by_driver = true, 762 .sensitivity_calib_by_driver = true,
@@ -791,7 +791,7 @@ struct iwl_cfg iwl6000i_2bg_cfg = {
791 .support_ct_kill_exit = true, 791 .support_ct_kill_exit = true,
792 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, 792 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
793 .chain_noise_scale = 1000, 793 .chain_noise_scale = 1000,
794 .monitor_recover_period = IWL_MONITORING_PERIOD, 794 .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
795 .max_event_log_size = 1024, 795 .max_event_log_size = 1024,
796 .ucode_tracing = true, 796 .ucode_tracing = true,
797 .sensitivity_calib_by_driver = true, 797 .sensitivity_calib_by_driver = true,
@@ -828,7 +828,7 @@ struct iwl_cfg iwl6050_2agn_cfg = {
828 .support_ct_kill_exit = true, 828 .support_ct_kill_exit = true,
829 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, 829 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
830 .chain_noise_scale = 1500, 830 .chain_noise_scale = 1500,
831 .monitor_recover_period = IWL_MONITORING_PERIOD, 831 .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
832 .max_event_log_size = 1024, 832 .max_event_log_size = 1024,
833 .ucode_tracing = true, 833 .ucode_tracing = true,
834 .sensitivity_calib_by_driver = true, 834 .sensitivity_calib_by_driver = true,
@@ -866,7 +866,7 @@ struct iwl_cfg iwl6050g2_bgn_cfg = {
866 .support_ct_kill_exit = true, 866 .support_ct_kill_exit = true,
867 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, 867 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
868 .chain_noise_scale = 1500, 868 .chain_noise_scale = 1500,
869 .monitor_recover_period = IWL_MONITORING_PERIOD, 869 .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
870 .max_event_log_size = 1024, 870 .max_event_log_size = 1024,
871 .ucode_tracing = true, 871 .ucode_tracing = true,
872 .sensitivity_calib_by_driver = true, 872 .sensitivity_calib_by_driver = true,
@@ -902,7 +902,7 @@ struct iwl_cfg iwl6050_2abg_cfg = {
902 .support_ct_kill_exit = true, 902 .support_ct_kill_exit = true,
903 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, 903 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
904 .chain_noise_scale = 1500, 904 .chain_noise_scale = 1500,
905 .monitor_recover_period = IWL_MONITORING_PERIOD, 905 .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
906 .max_event_log_size = 1024, 906 .max_event_log_size = 1024,
907 .ucode_tracing = true, 907 .ucode_tracing = true,
908 .sensitivity_calib_by_driver = true, 908 .sensitivity_calib_by_driver = true,
@@ -940,7 +940,7 @@ struct iwl_cfg iwl6000_3agn_cfg = {
940 .support_ct_kill_exit = true, 940 .support_ct_kill_exit = true,
941 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, 941 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
942 .chain_noise_scale = 1000, 942 .chain_noise_scale = 1000,
943 .monitor_recover_period = IWL_MONITORING_PERIOD, 943 .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
944 .max_event_log_size = 1024, 944 .max_event_log_size = 1024,
945 .ucode_tracing = true, 945 .ucode_tracing = true,
946 .sensitivity_calib_by_driver = true, 946 .sensitivity_calib_by_driver = true,
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
index 9dd9e64c2b0b..8fd00a6e5120 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
@@ -1411,7 +1411,7 @@ void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
1411 clear_bit(STATUS_SCAN_HW, &priv->status); 1411 clear_bit(STATUS_SCAN_HW, &priv->status);
1412 clear_bit(STATUS_SCANNING, &priv->status); 1412 clear_bit(STATUS_SCANNING, &priv->status);
1413 /* inform mac80211 scan aborted */ 1413 /* inform mac80211 scan aborted */
1414 queue_work(priv->workqueue, &priv->scan_completed); 1414 queue_work(priv->workqueue, &priv->abort_scan);
1415} 1415}
1416 1416
1417int iwlagn_manage_ibss_station(struct iwl_priv *priv, 1417int iwlagn_manage_ibss_station(struct iwl_priv *priv,
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index c1882fd8345d..10d7b9b7f064 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -3667,6 +3667,49 @@ out_exit:
3667 IWL_DEBUG_MAC80211(priv, "leave\n"); 3667 IWL_DEBUG_MAC80211(priv, "leave\n");
3668} 3668}
3669 3669
3670static void iwlagn_configure_filter(struct ieee80211_hw *hw,
3671 unsigned int changed_flags,
3672 unsigned int *total_flags,
3673 u64 multicast)
3674{
3675 struct iwl_priv *priv = hw->priv;
3676 __le32 filter_or = 0, filter_nand = 0;
3677
3678#define CHK(test, flag) do { \
3679 if (*total_flags & (test)) \
3680 filter_or |= (flag); \
3681 else \
3682 filter_nand |= (flag); \
3683 } while (0)
3684
3685 IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
3686 changed_flags, *total_flags);
3687
3688 CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
3689 CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK);
3690 CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
3691
3692#undef CHK
3693
3694 mutex_lock(&priv->mutex);
3695
3696 priv->staging_rxon.filter_flags &= ~filter_nand;
3697 priv->staging_rxon.filter_flags |= filter_or;
3698
3699 iwlcore_commit_rxon(priv);
3700
3701 mutex_unlock(&priv->mutex);
3702
3703 /*
3704 * Receiving all multicast frames is always enabled by the
3705 * default flags setup in iwl_connection_init_rx_config()
3706 * since we currently do not support programming multicast
3707 * filters into the device.
3708 */
3709 *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
3710 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
3711}
3712
3670static void iwl_mac_flush(struct ieee80211_hw *hw, bool drop) 3713static void iwl_mac_flush(struct ieee80211_hw *hw, bool drop)
3671{ 3714{
3672 struct iwl_priv *priv = hw->priv; 3715 struct iwl_priv *priv = hw->priv;
@@ -3867,7 +3910,7 @@ static struct ieee80211_ops iwl_hw_ops = {
3867 .add_interface = iwl_mac_add_interface, 3910 .add_interface = iwl_mac_add_interface,
3868 .remove_interface = iwl_mac_remove_interface, 3911 .remove_interface = iwl_mac_remove_interface,
3869 .config = iwl_mac_config, 3912 .config = iwl_mac_config,
3870 .configure_filter = iwl_configure_filter, 3913 .configure_filter = iwlagn_configure_filter,
3871 .set_key = iwl_mac_set_key, 3914 .set_key = iwl_mac_set_key,
3872 .update_tkip_key = iwl_mac_update_tkip_key, 3915 .update_tkip_key = iwl_mac_update_tkip_key,
3873 .conf_tx = iwl_mac_conf_tx, 3916 .conf_tx = iwl_mac_conf_tx,
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 2c03c6e20a72..e23c4060a0f0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -1328,51 +1328,6 @@ out:
1328EXPORT_SYMBOL(iwl_apm_init); 1328EXPORT_SYMBOL(iwl_apm_init);
1329 1329
1330 1330
1331
1332void iwl_configure_filter(struct ieee80211_hw *hw,
1333 unsigned int changed_flags,
1334 unsigned int *total_flags,
1335 u64 multicast)
1336{
1337 struct iwl_priv *priv = hw->priv;
1338 __le32 filter_or = 0, filter_nand = 0;
1339
1340#define CHK(test, flag) do { \
1341 if (*total_flags & (test)) \
1342 filter_or |= (flag); \
1343 else \
1344 filter_nand |= (flag); \
1345 } while (0)
1346
1347 IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
1348 changed_flags, *total_flags);
1349
1350 CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
1351 CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK);
1352 CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
1353
1354#undef CHK
1355
1356 mutex_lock(&priv->mutex);
1357
1358 priv->staging_rxon.filter_flags &= ~filter_nand;
1359 priv->staging_rxon.filter_flags |= filter_or;
1360
1361 iwlcore_commit_rxon(priv);
1362
1363 mutex_unlock(&priv->mutex);
1364
1365 /*
1366 * Receiving all multicast frames is always enabled by the
1367 * default flags setup in iwl_connection_init_rx_config()
1368 * since we currently do not support programming multicast
1369 * filters into the device.
1370 */
1371 *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
1372 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
1373}
1374EXPORT_SYMBOL(iwl_configure_filter);
1375
1376int iwl_set_hw_params(struct iwl_priv *priv) 1331int iwl_set_hw_params(struct iwl_priv *priv)
1377{ 1332{
1378 priv->hw_params.max_rxq_size = RX_QUEUE_SIZE; 1333 priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
@@ -2658,6 +2613,11 @@ int iwl_force_reset(struct iwl_priv *priv, int mode, bool external)
2658 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 2613 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2659 return -EINVAL; 2614 return -EINVAL;
2660 2615
2616 if (test_bit(STATUS_SCANNING, &priv->status)) {
2617 IWL_DEBUG_INFO(priv, "scan in progress.\n");
2618 return -EINVAL;
2619 }
2620
2661 if (mode >= IWL_MAX_FORCE_RESET) { 2621 if (mode >= IWL_MAX_FORCE_RESET) {
2662 IWL_DEBUG_INFO(priv, "invalid reset request.\n"); 2622 IWL_DEBUG_INFO(priv, "invalid reset request.\n");
2663 return -EINVAL; 2623 return -EINVAL;
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 4a71dfb10a15..5e6ee3da6bbf 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -372,9 +372,6 @@ int iwl_set_decrypted_flag(struct iwl_priv *priv,
372 u32 decrypt_res, 372 u32 decrypt_res,
373 struct ieee80211_rx_status *stats); 373 struct ieee80211_rx_status *stats);
374void iwl_irq_handle_error(struct iwl_priv *priv); 374void iwl_irq_handle_error(struct iwl_priv *priv);
375void iwl_configure_filter(struct ieee80211_hw *hw,
376 unsigned int changed_flags,
377 unsigned int *total_flags, u64 multicast);
378int iwl_set_hw_params(struct iwl_priv *priv); 375int iwl_set_hw_params(struct iwl_priv *priv);
379void iwl_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif); 376void iwl_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif);
380void iwl_bss_info_changed(struct ieee80211_hw *hw, 377void iwl_bss_info_changed(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index f35bcad56e36..2e97cd2fa98a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -1049,7 +1049,8 @@ struct iwl_event_log {
1049#define IWL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5) 1049#define IWL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5)
1050 1050
1051/* timer constants use to monitor and recover stuck tx queues in mSecs */ 1051/* timer constants use to monitor and recover stuck tx queues in mSecs */
1052#define IWL_MONITORING_PERIOD (1000) 1052#define IWL_DEF_MONITORING_PERIOD (1000)
1053#define IWL_LONG_MONITORING_PERIOD (5000)
1053#define IWL_ONE_HUNDRED_MSECS (100) 1054#define IWL_ONE_HUNDRED_MSECS (100)
1054#define IWL_SIXTY_SECS (60000) 1055#define IWL_SIXTY_SECS (60000)
1055 1056
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index 70c4b8fba0ee..d31661c1ce77 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -3018,7 +3018,7 @@ void iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
3018 clear_bit(STATUS_SCANNING, &priv->status); 3018 clear_bit(STATUS_SCANNING, &priv->status);
3019 3019
3020 /* inform mac80211 scan aborted */ 3020 /* inform mac80211 scan aborted */
3021 queue_work(priv->workqueue, &priv->scan_completed); 3021 queue_work(priv->workqueue, &priv->abort_scan);
3022} 3022}
3023 3023
3024static void iwl3945_bg_restart(struct work_struct *data) 3024static void iwl3945_bg_restart(struct work_struct *data)
@@ -3391,6 +3391,55 @@ static int iwl3945_mac_sta_add(struct ieee80211_hw *hw,
3391 3391
3392 return 0; 3392 return 0;
3393} 3393}
3394
3395static void iwl3945_configure_filter(struct ieee80211_hw *hw,
3396 unsigned int changed_flags,
3397 unsigned int *total_flags,
3398 u64 multicast)
3399{
3400 struct iwl_priv *priv = hw->priv;
3401 __le32 filter_or = 0, filter_nand = 0;
3402
3403#define CHK(test, flag) do { \
3404 if (*total_flags & (test)) \
3405 filter_or |= (flag); \
3406 else \
3407 filter_nand |= (flag); \
3408 } while (0)
3409
3410 IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
3411 changed_flags, *total_flags);
3412
3413 CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
3414 CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK);
3415 CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
3416
3417#undef CHK
3418
3419 mutex_lock(&priv->mutex);
3420
3421 priv->staging_rxon.filter_flags &= ~filter_nand;
3422 priv->staging_rxon.filter_flags |= filter_or;
3423
3424 /*
3425 * Committing directly here breaks for some reason,
3426 * but we'll eventually commit the filter flags
3427 * change anyway.
3428 */
3429
3430 mutex_unlock(&priv->mutex);
3431
3432 /*
3433 * Receiving all multicast frames is always enabled by the
3434 * default flags setup in iwl_connection_init_rx_config()
3435 * since we currently do not support programming multicast
3436 * filters into the device.
3437 */
3438 *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
3439 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
3440}
3441
3442
3394/***************************************************************************** 3443/*****************************************************************************
3395 * 3444 *
3396 * sysfs attributes 3445 * sysfs attributes
@@ -3796,7 +3845,7 @@ static struct ieee80211_ops iwl3945_hw_ops = {
3796 .add_interface = iwl_mac_add_interface, 3845 .add_interface = iwl_mac_add_interface,
3797 .remove_interface = iwl_mac_remove_interface, 3846 .remove_interface = iwl_mac_remove_interface,
3798 .config = iwl_mac_config, 3847 .config = iwl_mac_config,
3799 .configure_filter = iwl_configure_filter, 3848 .configure_filter = iwl3945_configure_filter,
3800 .set_key = iwl3945_mac_set_key, 3849 .set_key = iwl3945_mac_set_key,
3801 .conf_tx = iwl_mac_conf_tx, 3850 .conf_tx = iwl_mac_conf_tx,
3802 .reset_tsf = iwl_mac_reset_tsf, 3851 .reset_tsf = iwl_mac_reset_tsf,
diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c
index ba854c70ab94..87b634978b35 100644
--- a/drivers/net/wireless/libertas/if_sdio.c
+++ b/drivers/net/wireless/libertas/if_sdio.c
@@ -128,7 +128,7 @@ struct if_sdio_card {
128 bool helper_allocated; 128 bool helper_allocated;
129 bool firmware_allocated; 129 bool firmware_allocated;
130 130
131 u8 buffer[65536]; 131 u8 buffer[65536] __attribute__((aligned(4)));
132 132
133 spinlock_t lock; 133 spinlock_t lock;
134 struct if_sdio_packet *packets; 134 struct if_sdio_packet *packets;
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 01ad7f77383a..86fa8abdd66f 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -486,7 +486,7 @@ static bool mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
486 struct ieee80211_rx_status rx_status; 486 struct ieee80211_rx_status rx_status;
487 487
488 if (data->idle) { 488 if (data->idle) {
489 wiphy_debug(hw->wiphy, "trying to tx when idle - reject\n"); 489 wiphy_debug(hw->wiphy, "Trying to TX when idle - reject\n");
490 return false; 490 return false;
491 } 491 }
492 492
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index d761ed2d8af4..f152a25be59f 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -910,14 +910,14 @@ static int mwl8k_rxq_init(struct ieee80211_hw *hw, int index)
910 910
911 rxq->rxd = pci_alloc_consistent(priv->pdev, size, &rxq->rxd_dma); 911 rxq->rxd = pci_alloc_consistent(priv->pdev, size, &rxq->rxd_dma);
912 if (rxq->rxd == NULL) { 912 if (rxq->rxd == NULL) {
913 wiphy_err(hw->wiphy, "failed to alloc rx descriptors\n"); 913 wiphy_err(hw->wiphy, "failed to alloc RX descriptors\n");
914 return -ENOMEM; 914 return -ENOMEM;
915 } 915 }
916 memset(rxq->rxd, 0, size); 916 memset(rxq->rxd, 0, size);
917 917
918 rxq->buf = kmalloc(MWL8K_RX_DESCS * sizeof(*rxq->buf), GFP_KERNEL); 918 rxq->buf = kmalloc(MWL8K_RX_DESCS * sizeof(*rxq->buf), GFP_KERNEL);
919 if (rxq->buf == NULL) { 919 if (rxq->buf == NULL) {
920 wiphy_err(hw->wiphy, "failed to alloc rx skbuff list\n"); 920 wiphy_err(hw->wiphy, "failed to alloc RX skbuff list\n");
921 pci_free_consistent(priv->pdev, size, rxq->rxd, rxq->rxd_dma); 921 pci_free_consistent(priv->pdev, size, rxq->rxd, rxq->rxd_dma);
922 return -ENOMEM; 922 return -ENOMEM;
923 } 923 }
@@ -1145,14 +1145,14 @@ static int mwl8k_txq_init(struct ieee80211_hw *hw, int index)
1145 1145
1146 txq->txd = pci_alloc_consistent(priv->pdev, size, &txq->txd_dma); 1146 txq->txd = pci_alloc_consistent(priv->pdev, size, &txq->txd_dma);
1147 if (txq->txd == NULL) { 1147 if (txq->txd == NULL) {
1148 wiphy_err(hw->wiphy, "failed to alloc tx descriptors\n"); 1148 wiphy_err(hw->wiphy, "failed to alloc TX descriptors\n");
1149 return -ENOMEM; 1149 return -ENOMEM;
1150 } 1150 }
1151 memset(txq->txd, 0, size); 1151 memset(txq->txd, 0, size);
1152 1152
1153 txq->skb = kmalloc(MWL8K_TX_DESCS * sizeof(*txq->skb), GFP_KERNEL); 1153 txq->skb = kmalloc(MWL8K_TX_DESCS * sizeof(*txq->skb), GFP_KERNEL);
1154 if (txq->skb == NULL) { 1154 if (txq->skb == NULL) {
1155 wiphy_err(hw->wiphy, "failed to alloc tx skbuff list\n"); 1155 wiphy_err(hw->wiphy, "failed to alloc TX skbuff list\n");
1156 pci_free_consistent(priv->pdev, size, txq->txd, txq->txd_dma); 1156 pci_free_consistent(priv->pdev, size, txq->txd, txq->txd_dma);
1157 return -ENOMEM; 1157 return -ENOMEM;
1158 } 1158 }
@@ -1573,7 +1573,7 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd)
1573 PCI_DMA_BIDIRECTIONAL); 1573 PCI_DMA_BIDIRECTIONAL);
1574 1574
1575 if (!timeout) { 1575 if (!timeout) {
1576 wiphy_err(hw->wiphy, "command %s timeout after %u ms\n", 1576 wiphy_err(hw->wiphy, "Command %s timeout after %u ms\n",
1577 mwl8k_cmd_name(cmd->code, buf, sizeof(buf)), 1577 mwl8k_cmd_name(cmd->code, buf, sizeof(buf)),
1578 MWL8K_CMD_TIMEOUT_MS); 1578 MWL8K_CMD_TIMEOUT_MS);
1579 rc = -ETIMEDOUT; 1579 rc = -ETIMEDOUT;
@@ -1584,11 +1584,11 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd)
1584 1584
1585 rc = cmd->result ? -EINVAL : 0; 1585 rc = cmd->result ? -EINVAL : 0;
1586 if (rc) 1586 if (rc)
1587 wiphy_err(hw->wiphy, "command %s error 0x%x\n", 1587 wiphy_err(hw->wiphy, "Command %s error 0x%x\n",
1588 mwl8k_cmd_name(cmd->code, buf, sizeof(buf)), 1588 mwl8k_cmd_name(cmd->code, buf, sizeof(buf)),
1589 le16_to_cpu(cmd->result)); 1589 le16_to_cpu(cmd->result));
1590 else if (ms > 2000) 1590 else if (ms > 2000)
1591 wiphy_notice(hw->wiphy, "command %s took %d ms\n", 1591 wiphy_notice(hw->wiphy, "Command %s took %d ms\n",
1592 mwl8k_cmd_name(cmd->code, 1592 mwl8k_cmd_name(cmd->code,
1593 buf, sizeof(buf)), 1593 buf, sizeof(buf)),
1594 ms); 1594 ms);
@@ -3210,7 +3210,7 @@ static int mwl8k_start(struct ieee80211_hw *hw)
3210 rc = request_irq(priv->pdev->irq, mwl8k_interrupt, 3210 rc = request_irq(priv->pdev->irq, mwl8k_interrupt,
3211 IRQF_SHARED, MWL8K_NAME, hw); 3211 IRQF_SHARED, MWL8K_NAME, hw);
3212 if (rc) { 3212 if (rc) {
3213 wiphy_err(hw->wiphy, "failed to register irq handler\n"); 3213 wiphy_err(hw->wiphy, "failed to register IRQ handler\n");
3214 return -EIO; 3214 return -EIO;
3215 } 3215 }
3216 3216
@@ -3926,7 +3926,7 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
3926 3926
3927 priv->sram = pci_iomap(pdev, 0, 0x10000); 3927 priv->sram = pci_iomap(pdev, 0, 0x10000);
3928 if (priv->sram == NULL) { 3928 if (priv->sram == NULL) {
3929 wiphy_err(hw->wiphy, "cannot map device sram\n"); 3929 wiphy_err(hw->wiphy, "Cannot map device SRAM\n");
3930 goto err_iounmap; 3930 goto err_iounmap;
3931 } 3931 }
3932 3932
@@ -3938,7 +3938,7 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
3938 if (priv->regs == NULL) { 3938 if (priv->regs == NULL) {
3939 priv->regs = pci_iomap(pdev, 2, 0x10000); 3939 priv->regs = pci_iomap(pdev, 2, 0x10000);
3940 if (priv->regs == NULL) { 3940 if (priv->regs == NULL) {
3941 wiphy_err(hw->wiphy, "cannot map device registers\n"); 3941 wiphy_err(hw->wiphy, "Cannot map device registers\n");
3942 goto err_iounmap; 3942 goto err_iounmap;
3943 } 3943 }
3944 } 3944 }
@@ -3950,14 +3950,14 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
3950 /* Ask userland hotplug daemon for the device firmware */ 3950 /* Ask userland hotplug daemon for the device firmware */
3951 rc = mwl8k_request_firmware(priv); 3951 rc = mwl8k_request_firmware(priv);
3952 if (rc) { 3952 if (rc) {
3953 wiphy_err(hw->wiphy, "firmware files not found\n"); 3953 wiphy_err(hw->wiphy, "Firmware files not found\n");
3954 goto err_stop_firmware; 3954 goto err_stop_firmware;
3955 } 3955 }
3956 3956
3957 /* Load firmware into hardware */ 3957 /* Load firmware into hardware */
3958 rc = mwl8k_load_firmware(hw); 3958 rc = mwl8k_load_firmware(hw);
3959 if (rc) { 3959 if (rc) {
3960 wiphy_err(hw->wiphy, "cannot start firmware\n"); 3960 wiphy_err(hw->wiphy, "Cannot start firmware\n");
3961 goto err_stop_firmware; 3961 goto err_stop_firmware;
3962 } 3962 }
3963 3963
@@ -4047,7 +4047,7 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
4047 rc = request_irq(priv->pdev->irq, mwl8k_interrupt, 4047 rc = request_irq(priv->pdev->irq, mwl8k_interrupt,
4048 IRQF_SHARED, MWL8K_NAME, hw); 4048 IRQF_SHARED, MWL8K_NAME, hw);
4049 if (rc) { 4049 if (rc) {
4050 wiphy_err(hw->wiphy, "failed to register irq handler\n"); 4050 wiphy_err(hw->wiphy, "failed to register IRQ handler\n");
4051 goto err_free_queues; 4051 goto err_free_queues;
4052 } 4052 }
4053 4053
@@ -4067,7 +4067,7 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
4067 rc = mwl8k_cmd_get_hw_spec_sta(hw); 4067 rc = mwl8k_cmd_get_hw_spec_sta(hw);
4068 } 4068 }
4069 if (rc) { 4069 if (rc) {
4070 wiphy_err(hw->wiphy, "cannot initialise firmware\n"); 4070 wiphy_err(hw->wiphy, "Cannot initialise firmware\n");
4071 goto err_free_irq; 4071 goto err_free_irq;
4072 } 4072 }
4073 4073
@@ -4081,14 +4081,14 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
4081 /* Turn radio off */ 4081 /* Turn radio off */
4082 rc = mwl8k_cmd_radio_disable(hw); 4082 rc = mwl8k_cmd_radio_disable(hw);
4083 if (rc) { 4083 if (rc) {
4084 wiphy_err(hw->wiphy, "cannot disable\n"); 4084 wiphy_err(hw->wiphy, "Cannot disable\n");
4085 goto err_free_irq; 4085 goto err_free_irq;
4086 } 4086 }
4087 4087
4088 /* Clear MAC address */ 4088 /* Clear MAC address */
4089 rc = mwl8k_cmd_set_mac_addr(hw, NULL, "\x00\x00\x00\x00\x00\x00"); 4089 rc = mwl8k_cmd_set_mac_addr(hw, NULL, "\x00\x00\x00\x00\x00\x00");
4090 if (rc) { 4090 if (rc) {
4091 wiphy_err(hw->wiphy, "cannot clear mac address\n"); 4091 wiphy_err(hw->wiphy, "Cannot clear MAC address\n");
4092 goto err_free_irq; 4092 goto err_free_irq;
4093 } 4093 }
4094 4094
@@ -4098,7 +4098,7 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
4098 4098
4099 rc = ieee80211_register_hw(hw); 4099 rc = ieee80211_register_hw(hw);
4100 if (rc) { 4100 if (rc) {
4101 wiphy_err(hw->wiphy, "cannot register device\n"); 4101 wiphy_err(hw->wiphy, "Cannot register device\n");
4102 goto err_free_queues; 4102 goto err_free_queues;
4103 } 4103 }
4104 4104
diff --git a/drivers/net/wireless/p54/eeprom.c b/drivers/net/wireless/p54/eeprom.c
index d687cb7f2a59..78347041ec40 100644
--- a/drivers/net/wireless/p54/eeprom.c
+++ b/drivers/net/wireless/p54/eeprom.c
@@ -167,7 +167,7 @@ static int p54_generate_band(struct ieee80211_hw *dev,
167 } 167 }
168 168
169 if (j == 0) { 169 if (j == 0) {
170 wiphy_err(dev->wiphy, "disabling totally damaged %d GHz band\n", 170 wiphy_err(dev->wiphy, "Disabling totally damaged %d GHz band\n",
171 (band == IEEE80211_BAND_2GHZ) ? 2 : 5); 171 (band == IEEE80211_BAND_2GHZ) ? 2 : 5);
172 172
173 ret = -ENODATA; 173 ret = -ENODATA;
@@ -695,12 +695,12 @@ int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
695 u8 perm_addr[ETH_ALEN]; 695 u8 perm_addr[ETH_ALEN];
696 696
697 wiphy_warn(dev->wiphy, 697 wiphy_warn(dev->wiphy,
698 "invalid hwaddr! using randomly generated mac addr\n"); 698 "Invalid hwaddr! Using randomly generated MAC addr\n");
699 random_ether_addr(perm_addr); 699 random_ether_addr(perm_addr);
700 SET_IEEE80211_PERM_ADDR(dev, perm_addr); 700 SET_IEEE80211_PERM_ADDR(dev, perm_addr);
701 } 701 }
702 702
703 wiphy_info(dev->wiphy, "hwaddr %pm, mac:isl38%02x rf:%s\n", 703 wiphy_info(dev->wiphy, "hwaddr %pM, MAC:isl38%02x RF:%s\n",
704 dev->wiphy->perm_addr, priv->version, 704 dev->wiphy->perm_addr, priv->version,
705 p54_rf_chips[priv->rxhw]); 705 p54_rf_chips[priv->rxhw]);
706 706
diff --git a/drivers/net/wireless/p54/fwio.c b/drivers/net/wireless/p54/fwio.c
index 47006bca4852..15b20c29a604 100644
--- a/drivers/net/wireless/p54/fwio.c
+++ b/drivers/net/wireless/p54/fwio.c
@@ -125,7 +125,7 @@ int p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw)
125 125
126 if (fw_version) 126 if (fw_version)
127 wiphy_info(priv->hw->wiphy, 127 wiphy_info(priv->hw->wiphy,
128 "fw rev %s - softmac protocol %x.%x\n", 128 "FW rev %s - Softmac protocol %x.%x\n",
129 fw_version, priv->fw_var >> 8, priv->fw_var & 0xff); 129 fw_version, priv->fw_var >> 8, priv->fw_var & 0xff);
130 130
131 if (priv->fw_var < 0x500) 131 if (priv->fw_var < 0x500)
diff --git a/drivers/net/wireless/p54/led.c b/drivers/net/wireless/p54/led.c
index ea91f5cce6b3..3837e1eec5f4 100644
--- a/drivers/net/wireless/p54/led.c
+++ b/drivers/net/wireless/p54/led.c
@@ -58,7 +58,7 @@ static void p54_update_leds(struct work_struct *work)
58 err = p54_set_leds(priv); 58 err = p54_set_leds(priv);
59 if (err && net_ratelimit()) 59 if (err && net_ratelimit())
60 wiphy_err(priv->hw->wiphy, 60 wiphy_err(priv->hw->wiphy,
61 "failed to update leds (%d).\n", err); 61 "failed to update LEDs (%d).\n", err);
62 62
63 if (rerun) 63 if (rerun)
64 ieee80211_queue_delayed_work(priv->hw, &priv->led_work, 64 ieee80211_queue_delayed_work(priv->hw, &priv->led_work,
@@ -103,7 +103,7 @@ static int p54_register_led(struct p54_common *priv,
103 err = led_classdev_register(wiphy_dev(priv->hw->wiphy), &led->led_dev); 103 err = led_classdev_register(wiphy_dev(priv->hw->wiphy), &led->led_dev);
104 if (err) 104 if (err)
105 wiphy_err(priv->hw->wiphy, 105 wiphy_err(priv->hw->wiphy,
106 "failed to register %s led.\n", name); 106 "Failed to register %s LED.\n", name);
107 else 107 else
108 led->registered = 1; 108 led->registered = 1;
109 109
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index 822f8dc26e9c..1eacba4daa5b 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -466,7 +466,7 @@ static int p54p_open(struct ieee80211_hw *dev)
466 P54P_READ(dev_int); 466 P54P_READ(dev_int);
467 467
468 if (!wait_for_completion_interruptible_timeout(&priv->boot_comp, HZ)) { 468 if (!wait_for_completion_interruptible_timeout(&priv->boot_comp, HZ)) {
469 wiphy_err(dev->wiphy, "cannot boot firmware!\n"); 469 wiphy_err(dev->wiphy, "Cannot boot firmware!\n");
470 p54p_stop(dev); 470 p54p_stop(dev);
471 return -ETIMEDOUT; 471 return -ETIMEDOUT;
472 } 472 }
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
index 427b46f558ed..0e937dc0c9c4 100644
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -446,7 +446,7 @@ static void p54_rx_frame_sent(struct p54_common *priv, struct sk_buff *skb)
446 } 446 }
447 447
448 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) && 448 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) &&
449 (!payload->status)) 449 !(payload->status & P54_TX_FAILED))
450 info->flags |= IEEE80211_TX_STAT_ACK; 450 info->flags |= IEEE80211_TX_STAT_ACK;
451 if (payload->status & P54_TX_PSM_CANCELLED) 451 if (payload->status & P54_TX_PSM_CANCELLED)
452 info->flags |= IEEE80211_TX_STAT_TX_FILTERED; 452 info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
@@ -540,7 +540,7 @@ static void p54_rx_trap(struct p54_common *priv, struct sk_buff *skb)
540 case P54_TRAP_BEACON_TX: 540 case P54_TRAP_BEACON_TX:
541 break; 541 break;
542 case P54_TRAP_RADAR: 542 case P54_TRAP_RADAR:
543 wiphy_info(priv->hw->wiphy, "radar (freq:%d mhz)\n", freq); 543 wiphy_info(priv->hw->wiphy, "radar (freq:%d MHz)\n", freq);
544 break; 544 break;
545 case P54_TRAP_NO_BEACON: 545 case P54_TRAP_NO_BEACON:
546 if (priv->vif) 546 if (priv->vif)
diff --git a/drivers/net/wireless/rtl818x/rtl8180_dev.c b/drivers/net/wireless/rtl818x/rtl8180_dev.c
index b50c39aaec05..30107ce78dfb 100644
--- a/drivers/net/wireless/rtl818x/rtl8180_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180_dev.c
@@ -445,7 +445,7 @@ static int rtl8180_init_rx_ring(struct ieee80211_hw *dev)
445 &priv->rx_ring_dma); 445 &priv->rx_ring_dma);
446 446
447 if (!priv->rx_ring || (unsigned long)priv->rx_ring & 0xFF) { 447 if (!priv->rx_ring || (unsigned long)priv->rx_ring & 0xFF) {
448 wiphy_err(dev->wiphy, "cannot allocate rx ring\n"); 448 wiphy_err(dev->wiphy, "Cannot allocate RX ring\n");
449 return -ENOMEM; 449 return -ENOMEM;
450 } 450 }
451 451
@@ -502,7 +502,7 @@ static int rtl8180_init_tx_ring(struct ieee80211_hw *dev,
502 502
503 ring = pci_alloc_consistent(priv->pdev, sizeof(*ring) * entries, &dma); 503 ring = pci_alloc_consistent(priv->pdev, sizeof(*ring) * entries, &dma);
504 if (!ring || (unsigned long)ring & 0xFF) { 504 if (!ring || (unsigned long)ring & 0xFF) {
505 wiphy_err(dev->wiphy, "cannot allocate tx ring (prio = %d)\n", 505 wiphy_err(dev->wiphy, "Cannot allocate TX ring (prio = %d)\n",
506 prio); 506 prio);
507 return -ENOMEM; 507 return -ENOMEM;
508 } 508 }
@@ -568,7 +568,7 @@ static int rtl8180_start(struct ieee80211_hw *dev)
568 ret = request_irq(priv->pdev->irq, rtl8180_interrupt, 568 ret = request_irq(priv->pdev->irq, rtl8180_interrupt,
569 IRQF_SHARED, KBUILD_MODNAME, dev); 569 IRQF_SHARED, KBUILD_MODNAME, dev);
570 if (ret) { 570 if (ret) {
571 wiphy_err(dev->wiphy, "failed to register irq handler\n"); 571 wiphy_err(dev->wiphy, "failed to register IRQ handler\n");
572 goto err_free_rings; 572 goto err_free_rings;
573 } 573 }
574 574
diff --git a/drivers/net/wireless/rtl818x/rtl8187_dev.c b/drivers/net/wireless/rtl818x/rtl8187_dev.c
index 5738a55c1b06..98e0351c1dd6 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187_dev.c
@@ -573,7 +573,7 @@ static int rtl8187_cmd_reset(struct ieee80211_hw *dev)
573 } while (--i); 573 } while (--i);
574 574
575 if (!i) { 575 if (!i) {
576 wiphy_err(dev->wiphy, "reset timeout!\n"); 576 wiphy_err(dev->wiphy, "Reset timeout!\n");
577 return -ETIMEDOUT; 577 return -ETIMEDOUT;
578 } 578 }
579 579
@@ -1526,7 +1526,7 @@ static int __devinit rtl8187_probe(struct usb_interface *intf,
1526 mutex_init(&priv->conf_mutex); 1526 mutex_init(&priv->conf_mutex);
1527 skb_queue_head_init(&priv->b_tx_status.queue); 1527 skb_queue_head_init(&priv->b_tx_status.queue);
1528 1528
1529 wiphy_info(dev->wiphy, "hwaddr %pm, %s v%d + %s, rfkill mask %d\n", 1529 wiphy_info(dev->wiphy, "hwaddr %pM, %s V%d + %s, rfkill mask %d\n",
1530 mac_addr, chip_name, priv->asic_rev, priv->rf->name, 1530 mac_addr, chip_name, priv->asic_rev, priv->rf->name,
1531 priv->rfkill_mask); 1531 priv->rfkill_mask);
1532 1532
diff --git a/drivers/net/wireless/rtl818x/rtl8187_rtl8225.c b/drivers/net/wireless/rtl818x/rtl8187_rtl8225.c
index fd96f9112322..97eebdcf7eb9 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_rtl8225.c
+++ b/drivers/net/wireless/rtl818x/rtl8187_rtl8225.c
@@ -366,7 +366,7 @@ static void rtl8225_rf_init(struct ieee80211_hw *dev)
366 rtl8225_write(dev, 0x02, 0x044d); 366 rtl8225_write(dev, 0x02, 0x044d);
367 msleep(100); 367 msleep(100);
368 if (!(rtl8225_read(dev, 6) & (1 << 7))) 368 if (!(rtl8225_read(dev, 6) & (1 << 7)))
369 wiphy_warn(dev->wiphy, "rf calibration failed! %x\n", 369 wiphy_warn(dev->wiphy, "RF Calibration Failed! %x\n",
370 rtl8225_read(dev, 6)); 370 rtl8225_read(dev, 6));
371 } 371 }
372 372
@@ -735,7 +735,7 @@ static void rtl8225z2_rf_init(struct ieee80211_hw *dev)
735 rtl8225_write(dev, 0x02, 0x044D); 735 rtl8225_write(dev, 0x02, 0x044D);
736 msleep(100); 736 msleep(100);
737 if (!(rtl8225_read(dev, 6) & (1 << 7))) 737 if (!(rtl8225_read(dev, 6) & (1 << 7)))
738 wiphy_warn(dev->wiphy, "rf calibration failed! %x\n", 738 wiphy_warn(dev->wiphy, "RF Calibration Failed! %x\n",
739 rtl8225_read(dev, 6)); 739 rtl8225_read(dev, 6));
740 } 740 }
741 741
diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
index a9352b2c7ac4..b7e755f4178a 100644
--- a/drivers/oprofile/buffer_sync.c
+++ b/drivers/oprofile/buffer_sync.c
@@ -141,16 +141,6 @@ static struct notifier_block module_load_nb = {
141 .notifier_call = module_load_notify, 141 .notifier_call = module_load_notify,
142}; 142};
143 143
144
145static void end_sync(void)
146{
147 end_cpu_work();
148 /* make sure we don't leak task structs */
149 process_task_mortuary();
150 process_task_mortuary();
151}
152
153
154int sync_start(void) 144int sync_start(void)
155{ 145{
156 int err; 146 int err;
@@ -158,7 +148,7 @@ int sync_start(void)
158 if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL)) 148 if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL))
159 return -ENOMEM; 149 return -ENOMEM;
160 150
161 start_cpu_work(); 151 mutex_lock(&buffer_mutex);
162 152
163 err = task_handoff_register(&task_free_nb); 153 err = task_handoff_register(&task_free_nb);
164 if (err) 154 if (err)
@@ -173,7 +163,10 @@ int sync_start(void)
173 if (err) 163 if (err)
174 goto out4; 164 goto out4;
175 165
166 start_cpu_work();
167
176out: 168out:
169 mutex_unlock(&buffer_mutex);
177 return err; 170 return err;
178out4: 171out4:
179 profile_event_unregister(PROFILE_MUNMAP, &munmap_nb); 172 profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
@@ -182,7 +175,6 @@ out3:
182out2: 175out2:
183 task_handoff_unregister(&task_free_nb); 176 task_handoff_unregister(&task_free_nb);
184out1: 177out1:
185 end_sync();
186 free_cpumask_var(marked_cpus); 178 free_cpumask_var(marked_cpus);
187 goto out; 179 goto out;
188} 180}
@@ -190,11 +182,20 @@ out1:
190 182
191void sync_stop(void) 183void sync_stop(void)
192{ 184{
185 /* flush buffers */
186 mutex_lock(&buffer_mutex);
187 end_cpu_work();
193 unregister_module_notifier(&module_load_nb); 188 unregister_module_notifier(&module_load_nb);
194 profile_event_unregister(PROFILE_MUNMAP, &munmap_nb); 189 profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
195 profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb); 190 profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
196 task_handoff_unregister(&task_free_nb); 191 task_handoff_unregister(&task_free_nb);
197 end_sync(); 192 mutex_unlock(&buffer_mutex);
193 flush_scheduled_work();
194
195 /* make sure we don't leak task structs */
196 process_task_mortuary();
197 process_task_mortuary();
198
198 free_cpumask_var(marked_cpus); 199 free_cpumask_var(marked_cpus);
199} 200}
200 201
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
index 219f79e2210a..f179ac2ea801 100644
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -120,8 +120,6 @@ void end_cpu_work(void)
120 120
121 cancel_delayed_work(&b->work); 121 cancel_delayed_work(&b->work);
122 } 122 }
123
124 flush_scheduled_work();
125} 123}
126 124
127/* 125/*
diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c
index 45fcc1e96df9..3bc72d18b121 100644
--- a/drivers/pci/hotplug/acpi_pcihp.c
+++ b/drivers/pci/hotplug/acpi_pcihp.c
@@ -338,9 +338,7 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags)
338 acpi_handle chandle, handle; 338 acpi_handle chandle, handle;
339 struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL }; 339 struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL };
340 340
341 flags &= (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL | 341 flags &= OSC_SHPC_NATIVE_HP_CONTROL;
342 OSC_SHPC_NATIVE_HP_CONTROL |
343 OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
344 if (!flags) { 342 if (!flags) {
345 err("Invalid flags %u specified!\n", flags); 343 err("Invalid flags %u specified!\n", flags);
346 return -EINVAL; 344 return -EINVAL;
@@ -360,7 +358,7 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags)
360 acpi_get_name(handle, ACPI_FULL_PATHNAME, &string); 358 acpi_get_name(handle, ACPI_FULL_PATHNAME, &string);
361 dbg("Trying to get hotplug control for %s\n", 359 dbg("Trying to get hotplug control for %s\n",
362 (char *)string.pointer); 360 (char *)string.pointer);
363 status = acpi_pci_osc_control_set(handle, flags); 361 status = acpi_pci_osc_control_set(handle, &flags, flags);
364 if (ACPI_SUCCESS(status)) 362 if (ACPI_SUCCESS(status))
365 goto got_one; 363 goto got_one;
366 if (status == AE_SUPPORT) 364 if (status == AE_SUPPORT)
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 4ed76b47b6dc..73d513989263 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -176,19 +176,11 @@ static inline void pciehp_firmware_init(void)
176{ 176{
177 pciehp_acpi_slot_detection_init(); 177 pciehp_acpi_slot_detection_init();
178} 178}
179
180static inline int pciehp_get_hp_hw_control_from_firmware(struct pci_dev *dev)
181{
182 int retval;
183 u32 flags = (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL |
184 OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
185 retval = acpi_get_hp_hw_control_from_firmware(dev, flags);
186 if (retval)
187 return retval;
188 return pciehp_acpi_slot_detection_check(dev);
189}
190#else 179#else
191#define pciehp_firmware_init() do {} while (0) 180#define pciehp_firmware_init() do {} while (0)
192#define pciehp_get_hp_hw_control_from_firmware(dev) 0 181static inline int pciehp_acpi_slot_detection_check(struct pci_dev *dev)
182{
183 return 0;
184}
193#endif /* CONFIG_ACPI */ 185#endif /* CONFIG_ACPI */
194#endif /* _PCIEHP_H */ 186#endif /* _PCIEHP_H */
diff --git a/drivers/pci/hotplug/pciehp_acpi.c b/drivers/pci/hotplug/pciehp_acpi.c
index 1f4000a5a108..2574700db461 100644
--- a/drivers/pci/hotplug/pciehp_acpi.c
+++ b/drivers/pci/hotplug/pciehp_acpi.c
@@ -85,9 +85,7 @@ static int __init dummy_probe(struct pcie_device *dev)
85 acpi_handle handle; 85 acpi_handle handle;
86 struct dummy_slot *slot, *tmp; 86 struct dummy_slot *slot, *tmp;
87 struct pci_dev *pdev = dev->port; 87 struct pci_dev *pdev = dev->port;
88 /* Note: pciehp_detect_mode != PCIEHP_DETECT_ACPI here */ 88
89 if (pciehp_get_hp_hw_control_from_firmware(pdev))
90 return -ENODEV;
91 pos = pci_pcie_cap(pdev); 89 pos = pci_pcie_cap(pdev);
92 if (!pos) 90 if (!pos)
93 return -ENODEV; 91 return -ENODEV;
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 3588ea61b0dd..aa5f3ff629ff 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -59,7 +59,7 @@ module_param(pciehp_force, bool, 0644);
59MODULE_PARM_DESC(pciehp_debug, "Debugging mode enabled or not"); 59MODULE_PARM_DESC(pciehp_debug, "Debugging mode enabled or not");
60MODULE_PARM_DESC(pciehp_poll_mode, "Using polling mechanism for hot-plug events or not"); 60MODULE_PARM_DESC(pciehp_poll_mode, "Using polling mechanism for hot-plug events or not");
61MODULE_PARM_DESC(pciehp_poll_time, "Polling mechanism frequency, in seconds"); 61MODULE_PARM_DESC(pciehp_poll_time, "Polling mechanism frequency, in seconds");
62MODULE_PARM_DESC(pciehp_force, "Force pciehp, even if _OSC and OSHP are missing"); 62MODULE_PARM_DESC(pciehp_force, "Force pciehp, even if OSHP is missing");
63 63
64#define PCIE_MODULE_NAME "pciehp" 64#define PCIE_MODULE_NAME "pciehp"
65 65
@@ -235,7 +235,7 @@ static int pciehp_probe(struct pcie_device *dev)
235 dev_info(&dev->device, 235 dev_info(&dev->device,
236 "Bypassing BIOS check for pciehp use on %s\n", 236 "Bypassing BIOS check for pciehp use on %s\n",
237 pci_name(dev->port)); 237 pci_name(dev->port));
238 else if (pciehp_get_hp_hw_control_from_firmware(dev->port)) 238 else if (pciehp_acpi_slot_detection_check(dev->port))
239 goto err_out_none; 239 goto err_out_none;
240 240
241 ctrl = pcie_init(dev); 241 ctrl = pcie_init(dev);
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index c3ceebb5be84..4789f8e8bf7a 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -71,6 +71,49 @@
71#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32)) 71#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
72#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64)) 72#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
73 73
74/* page table handling */
75#define LEVEL_STRIDE (9)
76#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
77
78static inline int agaw_to_level(int agaw)
79{
80 return agaw + 2;
81}
82
83static inline int agaw_to_width(int agaw)
84{
85 return 30 + agaw * LEVEL_STRIDE;
86}
87
88static inline int width_to_agaw(int width)
89{
90 return (width - 30) / LEVEL_STRIDE;
91}
92
93static inline unsigned int level_to_offset_bits(int level)
94{
95 return (level - 1) * LEVEL_STRIDE;
96}
97
98static inline int pfn_level_offset(unsigned long pfn, int level)
99{
100 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
101}
102
103static inline unsigned long level_mask(int level)
104{
105 return -1UL << level_to_offset_bits(level);
106}
107
108static inline unsigned long level_size(int level)
109{
110 return 1UL << level_to_offset_bits(level);
111}
112
113static inline unsigned long align_to_level(unsigned long pfn, int level)
114{
115 return (pfn + level_size(level) - 1) & level_mask(level);
116}
74 117
75/* VT-d pages must always be _smaller_ than MM pages. Otherwise things 118/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
76 are never going to work. */ 119 are never going to work. */
@@ -434,8 +477,6 @@ void free_iova_mem(struct iova *iova)
434} 477}
435 478
436 479
437static inline int width_to_agaw(int width);
438
439static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw) 480static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
440{ 481{
441 unsigned long sagaw; 482 unsigned long sagaw;
@@ -646,51 +687,6 @@ out:
646 spin_unlock_irqrestore(&iommu->lock, flags); 687 spin_unlock_irqrestore(&iommu->lock, flags);
647} 688}
648 689
649/* page table handling */
650#define LEVEL_STRIDE (9)
651#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
652
653static inline int agaw_to_level(int agaw)
654{
655 return agaw + 2;
656}
657
658static inline int agaw_to_width(int agaw)
659{
660 return 30 + agaw * LEVEL_STRIDE;
661
662}
663
664static inline int width_to_agaw(int width)
665{
666 return (width - 30) / LEVEL_STRIDE;
667}
668
669static inline unsigned int level_to_offset_bits(int level)
670{
671 return (level - 1) * LEVEL_STRIDE;
672}
673
674static inline int pfn_level_offset(unsigned long pfn, int level)
675{
676 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
677}
678
679static inline unsigned long level_mask(int level)
680{
681 return -1UL << level_to_offset_bits(level);
682}
683
684static inline unsigned long level_size(int level)
685{
686 return 1UL << level_to_offset_bits(level);
687}
688
689static inline unsigned long align_to_level(unsigned long pfn, int level)
690{
691 return (pfn + level_size(level) - 1) & level_mask(level);
692}
693
694static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, 690static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
695 unsigned long pfn) 691 unsigned long pfn)
696{ 692{
@@ -3761,6 +3757,33 @@ static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
3761 3757
3762DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf); 3758DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
3763 3759
3760#define GGC 0x52
3761#define GGC_MEMORY_SIZE_MASK (0xf << 8)
3762#define GGC_MEMORY_SIZE_NONE (0x0 << 8)
3763#define GGC_MEMORY_SIZE_1M (0x1 << 8)
3764#define GGC_MEMORY_SIZE_2M (0x3 << 8)
3765#define GGC_MEMORY_VT_ENABLED (0x8 << 8)
3766#define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
3767#define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
3768#define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
3769
3770static void __devinit quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
3771{
3772 unsigned short ggc;
3773
3774 if (pci_read_config_word(dev, GGC, &ggc))
3775 return;
3776
3777 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
3778 printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
3779 dmar_map_gfx = 0;
3780 }
3781}
3782DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
3783DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
3784DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
3785DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
3786
3764/* On Tylersburg chipsets, some BIOSes have been known to enable the 3787/* On Tylersburg chipsets, some BIOSes have been known to enable the
3765 ISOCH DMAR unit for the Azalia sound device, but not give it any 3788 ISOCH DMAR unit for the Azalia sound device, but not give it any
3766 TLB entries, which causes it to deadlock. Check for that. We do 3789 TLB entries, which causes it to deadlock. Check for that. We do
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index ce6a3666b3d9..553d8ee55c1c 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -608,7 +608,7 @@ int pci_iov_resource_bar(struct pci_dev *dev, int resno,
608 * the VF BAR size multiplied by the number of VFs. The alignment 608 * the VF BAR size multiplied by the number of VFs. The alignment
609 * is just the VF BAR size. 609 * is just the VF BAR size.
610 */ 610 */
611int pci_sriov_resource_alignment(struct pci_dev *dev, int resno) 611resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno)
612{ 612{
613 struct resource tmp; 613 struct resource tmp;
614 enum pci_bar_type type; 614 enum pci_bar_type type;
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 679c39de6a89..6beb11b617a9 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -140,8 +140,10 @@ static inline void pci_msi_init_pci_dev(struct pci_dev *dev) { }
140 140
141#ifdef CONFIG_PCIEAER 141#ifdef CONFIG_PCIEAER
142void pci_no_aer(void); 142void pci_no_aer(void);
143bool pci_aer_available(void);
143#else 144#else
144static inline void pci_no_aer(void) { } 145static inline void pci_no_aer(void) { }
146static inline bool pci_aer_available(void) { return false; }
145#endif 147#endif
146 148
147static inline int pci_no_d1d2(struct pci_dev *dev) 149static inline int pci_no_d1d2(struct pci_dev *dev)
@@ -262,7 +264,8 @@ extern int pci_iov_init(struct pci_dev *dev);
262extern void pci_iov_release(struct pci_dev *dev); 264extern void pci_iov_release(struct pci_dev *dev);
263extern int pci_iov_resource_bar(struct pci_dev *dev, int resno, 265extern int pci_iov_resource_bar(struct pci_dev *dev, int resno,
264 enum pci_bar_type *type); 266 enum pci_bar_type *type);
265extern int pci_sriov_resource_alignment(struct pci_dev *dev, int resno); 267extern resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev,
268 int resno);
266extern void pci_restore_iov_state(struct pci_dev *dev); 269extern void pci_restore_iov_state(struct pci_dev *dev);
267extern int pci_iov_bus_range(struct pci_bus *bus); 270extern int pci_iov_bus_range(struct pci_bus *bus);
268 271
@@ -318,7 +321,7 @@ static inline int pci_ats_enabled(struct pci_dev *dev)
318} 321}
319#endif /* CONFIG_PCI_IOV */ 322#endif /* CONFIG_PCI_IOV */
320 323
321static inline int pci_resource_alignment(struct pci_dev *dev, 324static inline resource_size_t pci_resource_alignment(struct pci_dev *dev,
322 struct resource *res) 325 struct resource *res)
323{ 326{
324#ifdef CONFIG_PCI_IOV 327#ifdef CONFIG_PCI_IOV
diff --git a/drivers/pci/pcie/Makefile b/drivers/pci/pcie/Makefile
index ea654545e7c4..00c62df5a9fc 100644
--- a/drivers/pci/pcie/Makefile
+++ b/drivers/pci/pcie/Makefile
@@ -6,10 +6,11 @@
6obj-$(CONFIG_PCIEASPM) += aspm.o 6obj-$(CONFIG_PCIEASPM) += aspm.o
7 7
8pcieportdrv-y := portdrv_core.o portdrv_pci.o portdrv_bus.o 8pcieportdrv-y := portdrv_core.o portdrv_pci.o portdrv_bus.o
9pcieportdrv-$(CONFIG_ACPI) += portdrv_acpi.o
9 10
10obj-$(CONFIG_PCIEPORTBUS) += pcieportdrv.o 11obj-$(CONFIG_PCIEPORTBUS) += pcieportdrv.o
11 12
12# Build PCI Express AER if needed 13# Build PCI Express AER if needed
13obj-$(CONFIG_PCIEAER) += aer/ 14obj-$(CONFIG_PCIEAER) += aer/
14 15
15obj-$(CONFIG_PCIE_PME) += pme/ 16obj-$(CONFIG_PCIE_PME) += pme.o
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index 484cc55194b8..f409948e1a9b 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -72,6 +72,11 @@ void pci_no_aer(void)
72 pcie_aer_disable = 1; /* has priority over 'forceload' */ 72 pcie_aer_disable = 1; /* has priority over 'forceload' */
73} 73}
74 74
75bool pci_aer_available(void)
76{
77 return !pcie_aer_disable && pci_msi_enabled();
78}
79
75static int set_device_error_reporting(struct pci_dev *dev, void *data) 80static int set_device_error_reporting(struct pci_dev *dev, void *data)
76{ 81{
77 bool enable = *((bool *)data); 82 bool enable = *((bool *)data);
@@ -411,9 +416,7 @@ static void aer_error_resume(struct pci_dev *dev)
411 */ 416 */
412static int __init aer_service_init(void) 417static int __init aer_service_init(void)
413{ 418{
414 if (pcie_aer_disable) 419 if (!pci_aer_available())
415 return -ENXIO;
416 if (!pci_msi_enabled())
417 return -ENXIO; 420 return -ENXIO;
418 return pcie_port_service_register(&aerdriver); 421 return pcie_port_service_register(&aerdriver);
419} 422}
diff --git a/drivers/pci/pcie/aer/aerdrv_acpi.c b/drivers/pci/pcie/aer/aerdrv_acpi.c
index f278d7b0d95d..2bb9b8972211 100644
--- a/drivers/pci/pcie/aer/aerdrv_acpi.c
+++ b/drivers/pci/pcie/aer/aerdrv_acpi.c
@@ -19,42 +19,6 @@
19#include <acpi/apei.h> 19#include <acpi/apei.h>
20#include "aerdrv.h" 20#include "aerdrv.h"
21 21
22/**
23 * aer_osc_setup - run ACPI _OSC method
24 * @pciedev: pcie_device which AER is being enabled on
25 *
26 * @return: Zero on success. Nonzero otherwise.
27 *
28 * Invoked when PCIe bus loads AER service driver. To avoid conflict with
29 * BIOS AER support requires BIOS to yield AER control to OS native driver.
30 **/
31int aer_osc_setup(struct pcie_device *pciedev)
32{
33 acpi_status status = AE_NOT_FOUND;
34 struct pci_dev *pdev = pciedev->port;
35 acpi_handle handle = NULL;
36
37 if (acpi_pci_disabled)
38 return -1;
39
40 handle = acpi_find_root_bridge_handle(pdev);
41 if (handle) {
42 status = acpi_pci_osc_control_set(handle,
43 OSC_PCI_EXPRESS_AER_CONTROL |
44 OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
45 }
46
47 if (ACPI_FAILURE(status)) {
48 dev_printk(KERN_DEBUG, &pciedev->device, "AER service couldn't "
49 "init device: %s\n",
50 (status == AE_SUPPORT || status == AE_NOT_FOUND) ?
51 "no _OSC support" : "_OSC failed");
52 return -1;
53 }
54
55 return 0;
56}
57
58#ifdef CONFIG_ACPI_APEI 22#ifdef CONFIG_ACPI_APEI
59static inline int hest_match_pci(struct acpi_hest_aer_common *p, 23static inline int hest_match_pci(struct acpi_hest_aer_common *p,
60 struct pci_dev *pci) 24 struct pci_dev *pci)
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index fc0b5a93e1de..29e268fadf14 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -772,22 +772,10 @@ void aer_isr(struct work_struct *work)
772 */ 772 */
773int aer_init(struct pcie_device *dev) 773int aer_init(struct pcie_device *dev)
774{ 774{
775 if (pcie_aer_get_firmware_first(dev->port)) {
776 dev_printk(KERN_DEBUG, &dev->device,
777 "PCIe errors handled by platform firmware.\n");
778 goto out;
779 }
780
781 if (aer_osc_setup(dev))
782 goto out;
783
784 return 0;
785out:
786 if (forceload) { 775 if (forceload) {
787 dev_printk(KERN_DEBUG, &dev->device, 776 dev_printk(KERN_DEBUG, &dev->device,
788 "aerdrv forceload requested.\n"); 777 "aerdrv forceload requested.\n");
789 pcie_aer_force_firmware_first(dev->port, 0); 778 pcie_aer_force_firmware_first(dev->port, 0);
790 return 0;
791 } 779 }
792 return -ENXIO; 780 return 0;
793} 781}
diff --git a/drivers/pci/pcie/pme/pcie_pme.c b/drivers/pci/pcie/pme.c
index bbdea18693d9..2f3c90407227 100644
--- a/drivers/pci/pcie/pme/pcie_pme.c
+++ b/drivers/pci/pcie/pme.c
@@ -23,38 +23,13 @@
23#include <linux/pci-acpi.h> 23#include <linux/pci-acpi.h>
24#include <linux/pm_runtime.h> 24#include <linux/pm_runtime.h>
25 25
26#include "../../pci.h" 26#include "../pci.h"
27#include "pcie_pme.h" 27#include "portdrv.h"
28 28
29#define PCI_EXP_RTSTA_PME 0x10000 /* PME status */ 29#define PCI_EXP_RTSTA_PME 0x10000 /* PME status */
30#define PCI_EXP_RTSTA_PENDING 0x20000 /* PME pending */ 30#define PCI_EXP_RTSTA_PENDING 0x20000 /* PME pending */
31 31
32/* 32/*
33 * If set, this switch will prevent the PCIe root port PME service driver from
34 * being registered. Consequently, the interrupt-based PCIe PME signaling will
35 * not be used by any PCIe root ports in that case.
36 */
37static bool pcie_pme_disabled = true;
38
39/*
40 * The PCI Express Base Specification 2.0, Section 6.1.8, states the following:
41 * "In order to maintain compatibility with non-PCI Express-aware system
42 * software, system power management logic must be configured by firmware to use
43 * the legacy mechanism of signaling PME by default. PCI Express-aware system
44 * software must notify the firmware prior to enabling native, interrupt-based
45 * PME signaling." However, if the platform doesn't provide us with a suitable
46 * notification mechanism or the notification fails, it is not clear whether or
47 * not we are supposed to use the interrupt-based PCIe PME signaling. The
48 * switch below can be used to indicate the desired behaviour. When set, it
49 * will make the kernel use the interrupt-based PCIe PME signaling regardless of
50 * the platform notification status, although the kernel will attempt to notify
51 * the platform anyway. When unset, it will prevent the kernel from using the
52 * the interrupt-based PCIe PME signaling if the platform notification fails,
53 * which is the default.
54 */
55static bool pcie_pme_force_enable;
56
57/*
58 * If this switch is set, MSI will not be used for PCIe PME signaling. This 33 * If this switch is set, MSI will not be used for PCIe PME signaling. This
59 * causes the PCIe port driver to use INTx interrupts only, but it turns out 34 * causes the PCIe port driver to use INTx interrupts only, but it turns out
60 * that using MSI for PCIe PME signaling doesn't play well with PCIe PME-based 35 * that using MSI for PCIe PME signaling doesn't play well with PCIe PME-based
@@ -64,38 +39,13 @@ bool pcie_pme_msi_disabled;
64 39
65static int __init pcie_pme_setup(char *str) 40static int __init pcie_pme_setup(char *str)
66{ 41{
67 if (!strncmp(str, "auto", 4)) 42 if (!strncmp(str, "nomsi", 5))
68 pcie_pme_disabled = false; 43 pcie_pme_msi_disabled = true;
69 else if (!strncmp(str, "force", 5))
70 pcie_pme_force_enable = true;
71
72 str = strchr(str, ',');
73 if (str) {
74 str++;
75 str += strspn(str, " \t");
76 if (*str && !strcmp(str, "nomsi"))
77 pcie_pme_msi_disabled = true;
78 }
79 44
80 return 1; 45 return 1;
81} 46}
82__setup("pcie_pme=", pcie_pme_setup); 47__setup("pcie_pme=", pcie_pme_setup);
83 48
84/**
85 * pcie_pme_platform_setup - Ensure that the kernel controls the PCIe PME.
86 * @srv: PCIe PME root port service to use for carrying out the check.
87 *
88 * Notify the platform that the native PCIe PME is going to be used and return
89 * 'true' if the control of the PCIe PME registers has been acquired from the
90 * platform.
91 */
92static bool pcie_pme_platform_setup(struct pcie_device *srv)
93{
94 if (!pcie_pme_platform_notify(srv))
95 return true;
96 return pcie_pme_force_enable;
97}
98
99struct pcie_pme_service_data { 49struct pcie_pme_service_data {
100 spinlock_t lock; 50 spinlock_t lock;
101 struct pcie_device *srv; 51 struct pcie_device *srv;
@@ -108,7 +58,7 @@ struct pcie_pme_service_data {
108 * @dev: PCIe root port or event collector. 58 * @dev: PCIe root port or event collector.
109 * @enable: Enable or disable the interrupt. 59 * @enable: Enable or disable the interrupt.
110 */ 60 */
111static void pcie_pme_interrupt_enable(struct pci_dev *dev, bool enable) 61void pcie_pme_interrupt_enable(struct pci_dev *dev, bool enable)
112{ 62{
113 int rtctl_pos; 63 int rtctl_pos;
114 u16 rtctl; 64 u16 rtctl;
@@ -417,9 +367,6 @@ static int pcie_pme_probe(struct pcie_device *srv)
417 struct pcie_pme_service_data *data; 367 struct pcie_pme_service_data *data;
418 int ret; 368 int ret;
419 369
420 if (!pcie_pme_platform_setup(srv))
421 return -EACCES;
422
423 data = kzalloc(sizeof(*data), GFP_KERNEL); 370 data = kzalloc(sizeof(*data), GFP_KERNEL);
424 if (!data) 371 if (!data)
425 return -ENOMEM; 372 return -ENOMEM;
@@ -509,8 +456,7 @@ static struct pcie_port_service_driver pcie_pme_driver = {
509 */ 456 */
510static int __init pcie_pme_service_init(void) 457static int __init pcie_pme_service_init(void)
511{ 458{
512 return pcie_pme_disabled ? 459 return pcie_port_service_register(&pcie_pme_driver);
513 -ENODEV : pcie_port_service_register(&pcie_pme_driver);
514} 460}
515 461
516module_init(pcie_pme_service_init); 462module_init(pcie_pme_service_init);
diff --git a/drivers/pci/pcie/pme/Makefile b/drivers/pci/pcie/pme/Makefile
deleted file mode 100644
index 8b9238053080..000000000000
--- a/drivers/pci/pcie/pme/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
1#
2# Makefile for PCI-Express Root Port PME signaling driver
3#
4
5obj-$(CONFIG_PCIE_PME) += pmedriver.o
6
7pmedriver-objs := pcie_pme.o
8pmedriver-$(CONFIG_ACPI) += pcie_pme_acpi.o
diff --git a/drivers/pci/pcie/pme/pcie_pme.h b/drivers/pci/pcie/pme/pcie_pme.h
deleted file mode 100644
index b30d2b7c9775..000000000000
--- a/drivers/pci/pcie/pme/pcie_pme.h
+++ /dev/null
@@ -1,28 +0,0 @@
1/*
2 * drivers/pci/pcie/pme/pcie_pme.h
3 *
4 * PCI Express Root Port PME signaling support
5 *
6 * Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
7 */
8
9#ifndef _PCIE_PME_H_
10#define _PCIE_PME_H_
11
12struct pcie_device;
13
14#ifdef CONFIG_ACPI
15extern int pcie_pme_acpi_setup(struct pcie_device *srv);
16
17static inline int pcie_pme_platform_notify(struct pcie_device *srv)
18{
19 return pcie_pme_acpi_setup(srv);
20}
21#else /* !CONFIG_ACPI */
22static inline int pcie_pme_platform_notify(struct pcie_device *srv)
23{
24 return 0;
25}
26#endif /* !CONFIG_ACPI */
27
28#endif
diff --git a/drivers/pci/pcie/pme/pcie_pme_acpi.c b/drivers/pci/pcie/pme/pcie_pme_acpi.c
deleted file mode 100644
index 83ab2287ae3f..000000000000
--- a/drivers/pci/pcie/pme/pcie_pme_acpi.c
+++ /dev/null
@@ -1,54 +0,0 @@
1/*
2 * PCIe Native PME support, ACPI-related part
3 *
4 * Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License V2. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10
11#include <linux/pci.h>
12#include <linux/kernel.h>
13#include <linux/errno.h>
14#include <linux/acpi.h>
15#include <linux/pci-acpi.h>
16#include <linux/pcieport_if.h>
17
18/**
19 * pcie_pme_acpi_setup - Request the ACPI BIOS to release control over PCIe PME.
20 * @srv - PCIe PME service for a root port or event collector.
21 *
22 * Invoked when the PCIe bus type loads PCIe PME service driver. To avoid
23 * conflict with the BIOS PCIe support requires the BIOS to yield PCIe PME
24 * control to the kernel.
25 */
26int pcie_pme_acpi_setup(struct pcie_device *srv)
27{
28 acpi_status status = AE_NOT_FOUND;
29 struct pci_dev *port = srv->port;
30 acpi_handle handle;
31 int error = 0;
32
33 if (acpi_pci_disabled)
34 return -ENOSYS;
35
36 dev_info(&port->dev, "Requesting control of PCIe PME from ACPI BIOS\n");
37
38 handle = acpi_find_root_bridge_handle(port);
39 if (!handle)
40 return -EINVAL;
41
42 status = acpi_pci_osc_control_set(handle,
43 OSC_PCI_EXPRESS_PME_CONTROL |
44 OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
45 if (ACPI_FAILURE(status)) {
46 dev_info(&port->dev,
47 "Failed to receive control of PCIe PME service: %s\n",
48 (status == AE_SUPPORT || status == AE_NOT_FOUND) ?
49 "no _OSC support" : "ACPI _OSC failed");
50 error = -ENODEV;
51 }
52
53 return error;
54}
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h
index 813a5c3427b6..7b5aba0a3291 100644
--- a/drivers/pci/pcie/portdrv.h
+++ b/drivers/pci/pcie/portdrv.h
@@ -20,6 +20,9 @@
20 20
21#define get_descriptor_id(type, service) (((type - 4) << 4) | service) 21#define get_descriptor_id(type, service) (((type - 4) << 4) | service)
22 22
23extern bool pcie_ports_disabled;
24extern bool pcie_ports_auto;
25
23extern struct bus_type pcie_port_bus_type; 26extern struct bus_type pcie_port_bus_type;
24extern int pcie_port_device_register(struct pci_dev *dev); 27extern int pcie_port_device_register(struct pci_dev *dev);
25#ifdef CONFIG_PM 28#ifdef CONFIG_PM
@@ -30,6 +33,8 @@ extern void pcie_port_device_remove(struct pci_dev *dev);
30extern int __must_check pcie_port_bus_register(void); 33extern int __must_check pcie_port_bus_register(void);
31extern void pcie_port_bus_unregister(void); 34extern void pcie_port_bus_unregister(void);
32 35
36struct pci_dev;
37
33#ifdef CONFIG_PCIE_PME 38#ifdef CONFIG_PCIE_PME
34extern bool pcie_pme_msi_disabled; 39extern bool pcie_pme_msi_disabled;
35 40
@@ -42,9 +47,26 @@ static inline bool pcie_pme_no_msi(void)
42{ 47{
43 return pcie_pme_msi_disabled; 48 return pcie_pme_msi_disabled;
44} 49}
50
51extern void pcie_pme_interrupt_enable(struct pci_dev *dev, bool enable);
45#else /* !CONFIG_PCIE_PME */ 52#else /* !CONFIG_PCIE_PME */
46static inline void pcie_pme_disable_msi(void) {} 53static inline void pcie_pme_disable_msi(void) {}
47static inline bool pcie_pme_no_msi(void) { return false; } 54static inline bool pcie_pme_no_msi(void) { return false; }
55static inline void pcie_pme_interrupt_enable(struct pci_dev *dev, bool en) {}
48#endif /* !CONFIG_PCIE_PME */ 56#endif /* !CONFIG_PCIE_PME */
49 57
58#ifdef CONFIG_ACPI
59extern int pcie_port_acpi_setup(struct pci_dev *port, int *mask);
60
61static inline int pcie_port_platform_notify(struct pci_dev *port, int *mask)
62{
63 return pcie_port_acpi_setup(port, mask);
64}
65#else /* !CONFIG_ACPI */
66static inline int pcie_port_platform_notify(struct pci_dev *port, int *mask)
67{
68 return 0;
69}
70#endif /* !CONFIG_ACPI */
71
50#endif /* _PORTDRV_H_ */ 72#endif /* _PORTDRV_H_ */
diff --git a/drivers/pci/pcie/portdrv_acpi.c b/drivers/pci/pcie/portdrv_acpi.c
new file mode 100644
index 000000000000..b7c4cb1ccb23
--- /dev/null
+++ b/drivers/pci/pcie/portdrv_acpi.c
@@ -0,0 +1,77 @@
1/*
2 * PCIe Port Native Services Support, ACPI-Related Part
3 *
4 * Copyright (C) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License V2. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10
11#include <linux/pci.h>
12#include <linux/kernel.h>
13#include <linux/errno.h>
14#include <linux/acpi.h>
15#include <linux/pci-acpi.h>
16#include <linux/pcieport_if.h>
17
18#include "aer/aerdrv.h"
19#include "../pci.h"
20
21/**
22 * pcie_port_acpi_setup - Request the BIOS to release control of PCIe services.
23 * @port: PCIe Port service for a root port or event collector.
24 * @srv_mask: Bit mask of services that can be enabled for @port.
25 *
26 * Invoked when @port is identified as a PCIe port device. To avoid conflicts
27 * with the BIOS PCIe port native services support requires the BIOS to yield
28 * control of these services to the kernel. The mask of services that the BIOS
29 * allows to be enabled for @port is written to @srv_mask.
30 *
31 * NOTE: It turns out that we cannot do that for individual port services
32 * separately, because that would make some systems work incorrectly.
33 */
34int pcie_port_acpi_setup(struct pci_dev *port, int *srv_mask)
35{
36 acpi_status status;
37 acpi_handle handle;
38 u32 flags;
39
40 if (acpi_pci_disabled)
41 return 0;
42
43 handle = acpi_find_root_bridge_handle(port);
44 if (!handle)
45 return -EINVAL;
46
47 flags = OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL
48 | OSC_PCI_EXPRESS_NATIVE_HP_CONTROL
49 | OSC_PCI_EXPRESS_PME_CONTROL;
50
51 if (pci_aer_available()) {
52 if (pcie_aer_get_firmware_first(port))
53 dev_dbg(&port->dev, "PCIe errors handled by BIOS.\n");
54 else
55 flags |= OSC_PCI_EXPRESS_AER_CONTROL;
56 }
57
58 status = acpi_pci_osc_control_set(handle, &flags,
59 OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
60 if (ACPI_FAILURE(status)) {
61 dev_dbg(&port->dev, "ACPI _OSC request failed (code %d)\n",
62 status);
63 return -ENODEV;
64 }
65
66 dev_info(&port->dev, "ACPI _OSC control granted for 0x%02x\n", flags);
67
68 *srv_mask = PCIE_PORT_SERVICE_VC;
69 if (flags & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL)
70 *srv_mask |= PCIE_PORT_SERVICE_HP;
71 if (flags & OSC_PCI_EXPRESS_PME_CONTROL)
72 *srv_mask |= PCIE_PORT_SERVICE_PME;
73 if (flags & OSC_PCI_EXPRESS_AER_CONTROL)
74 *srv_mask |= PCIE_PORT_SERVICE_AER;
75
76 return 0;
77}
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index e73effbe402c..a9c222d79ebc 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -14,6 +14,8 @@
14#include <linux/string.h> 14#include <linux/string.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/pcieport_if.h> 16#include <linux/pcieport_if.h>
17#include <linux/aer.h>
18#include <linux/pci-aspm.h>
17 19
18#include "../pci.h" 20#include "../pci.h"
19#include "portdrv.h" 21#include "portdrv.h"
@@ -236,24 +238,64 @@ static int get_port_device_capability(struct pci_dev *dev)
236 int services = 0, pos; 238 int services = 0, pos;
237 u16 reg16; 239 u16 reg16;
238 u32 reg32; 240 u32 reg32;
241 int cap_mask;
242 int err;
243
244 err = pcie_port_platform_notify(dev, &cap_mask);
245 if (pcie_ports_auto) {
246 if (err) {
247 pcie_no_aspm();
248 return 0;
249 }
250 } else {
251 cap_mask = PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP
252 | PCIE_PORT_SERVICE_VC;
253 if (pci_aer_available())
254 cap_mask |= PCIE_PORT_SERVICE_AER;
255 }
239 256
240 pos = pci_pcie_cap(dev); 257 pos = pci_pcie_cap(dev);
241 pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &reg16); 258 pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &reg16);
242 /* Hot-Plug Capable */ 259 /* Hot-Plug Capable */
243 if (reg16 & PCI_EXP_FLAGS_SLOT) { 260 if ((cap_mask & PCIE_PORT_SERVICE_HP) && (reg16 & PCI_EXP_FLAGS_SLOT)) {
244 pci_read_config_dword(dev, pos + PCI_EXP_SLTCAP, &reg32); 261 pci_read_config_dword(dev, pos + PCI_EXP_SLTCAP, &reg32);
245 if (reg32 & PCI_EXP_SLTCAP_HPC) 262 if (reg32 & PCI_EXP_SLTCAP_HPC) {
246 services |= PCIE_PORT_SERVICE_HP; 263 services |= PCIE_PORT_SERVICE_HP;
264 /*
265 * Disable hot-plug interrupts in case they have been
266 * enabled by the BIOS and the hot-plug service driver
267 * is not loaded.
268 */
269 pos += PCI_EXP_SLTCTL;
270 pci_read_config_word(dev, pos, &reg16);
271 reg16 &= ~(PCI_EXP_SLTCTL_CCIE | PCI_EXP_SLTCTL_HPIE);
272 pci_write_config_word(dev, pos, reg16);
273 }
247 } 274 }
248 /* AER capable */ 275 /* AER capable */
249 if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR)) 276 if ((cap_mask & PCIE_PORT_SERVICE_AER)
277 && pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR)) {
250 services |= PCIE_PORT_SERVICE_AER; 278 services |= PCIE_PORT_SERVICE_AER;
279 /*
280 * Disable AER on this port in case it's been enabled by the
281 * BIOS (the AER service driver will enable it when necessary).
282 */
283 pci_disable_pcie_error_reporting(dev);
284 }
251 /* VC support */ 285 /* VC support */
252 if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_VC)) 286 if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_VC))
253 services |= PCIE_PORT_SERVICE_VC; 287 services |= PCIE_PORT_SERVICE_VC;
254 /* Root ports are capable of generating PME too */ 288 /* Root ports are capable of generating PME too */
255 if (dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT) 289 if ((cap_mask & PCIE_PORT_SERVICE_PME)
290 && dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT) {
256 services |= PCIE_PORT_SERVICE_PME; 291 services |= PCIE_PORT_SERVICE_PME;
292 /*
293 * Disable PME interrupt on this port in case it's been enabled
294 * by the BIOS (the PME service driver will enable it when
295 * necessary).
296 */
297 pcie_pme_interrupt_enable(dev, false);
298 }
257 299
258 return services; 300 return services;
259} 301}
@@ -494,6 +536,9 @@ static void pcie_port_shutdown_service(struct device *dev) {}
494 */ 536 */
495int pcie_port_service_register(struct pcie_port_service_driver *new) 537int pcie_port_service_register(struct pcie_port_service_driver *new)
496{ 538{
539 if (pcie_ports_disabled)
540 return -ENODEV;
541
497 new->driver.name = (char *)new->name; 542 new->driver.name = (char *)new->name;
498 new->driver.bus = &pcie_port_bus_type; 543 new->driver.bus = &pcie_port_bus_type;
499 new->driver.probe = pcie_port_probe_service; 544 new->driver.probe = pcie_port_probe_service;
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index 3debed25e46b..f9033e190fb6 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -15,6 +15,7 @@
15#include <linux/pcieport_if.h> 15#include <linux/pcieport_if.h>
16#include <linux/aer.h> 16#include <linux/aer.h>
17#include <linux/dmi.h> 17#include <linux/dmi.h>
18#include <linux/pci-aspm.h>
18 19
19#include "portdrv.h" 20#include "portdrv.h"
20#include "aer/aerdrv.h" 21#include "aer/aerdrv.h"
@@ -29,6 +30,31 @@ MODULE_AUTHOR(DRIVER_AUTHOR);
29MODULE_DESCRIPTION(DRIVER_DESC); 30MODULE_DESCRIPTION(DRIVER_DESC);
30MODULE_LICENSE("GPL"); 31MODULE_LICENSE("GPL");
31 32
33/* If this switch is set, PCIe port native services should not be enabled. */
34bool pcie_ports_disabled;
35
36/*
37 * If this switch is set, ACPI _OSC will be used to determine whether or not to
38 * enable PCIe port native services.
39 */
40bool pcie_ports_auto = true;
41
42static int __init pcie_port_setup(char *str)
43{
44 if (!strncmp(str, "compat", 6)) {
45 pcie_ports_disabled = true;
46 } else if (!strncmp(str, "native", 6)) {
47 pcie_ports_disabled = false;
48 pcie_ports_auto = false;
49 } else if (!strncmp(str, "auto", 4)) {
50 pcie_ports_disabled = false;
51 pcie_ports_auto = true;
52 }
53
54 return 1;
55}
56__setup("pcie_ports=", pcie_port_setup);
57
32/* global data */ 58/* global data */
33 59
34static int pcie_portdrv_restore_config(struct pci_dev *dev) 60static int pcie_portdrv_restore_config(struct pci_dev *dev)
@@ -301,6 +327,11 @@ static int __init pcie_portdrv_init(void)
301{ 327{
302 int retval; 328 int retval;
303 329
330 if (pcie_ports_disabled) {
331 pcie_no_aspm();
332 return -EACCES;
333 }
334
304 dmi_check_system(pcie_portdrv_dmi_table); 335 dmi_check_system(pcie_portdrv_dmi_table);
305 336
306 retval = pcie_port_bus_register(); 337 retval = pcie_port_bus_register();
@@ -315,11 +346,4 @@ static int __init pcie_portdrv_init(void)
315 return retval; 346 return retval;
316} 347}
317 348
318static void __exit pcie_portdrv_exit(void)
319{
320 pci_unregister_driver(&pcie_portdriver);
321 pcie_port_bus_unregister();
322}
323
324module_init(pcie_portdrv_init); 349module_init(pcie_portdrv_init);
325module_exit(pcie_portdrv_exit);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 89ed181cd90c..857ae01734a6 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -163,6 +163,26 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_2, quirk_isa_d
163DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_3, quirk_isa_dma_hangs); 163DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_3, quirk_isa_dma_hangs);
164 164
165/* 165/*
166 * Intel NM10 "TigerPoint" LPC PM1a_STS.BM_STS must be clear
167 * for some HT machines to use C4 w/o hanging.
168 */
169static void __devinit quirk_tigerpoint_bm_sts(struct pci_dev *dev)
170{
171 u32 pmbase;
172 u16 pm1a;
173
174 pci_read_config_dword(dev, 0x40, &pmbase);
175 pmbase = pmbase & 0xff80;
176 pm1a = inw(pmbase);
177
178 if (pm1a & 0x10) {
179 dev_info(&dev->dev, FW_BUG "TigerPoint LPC.BM_STS cleared\n");
180 outw(0x10, pmbase);
181 }
182}
183DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGP_LPC, quirk_tigerpoint_bm_sts);
184
185/*
166 * Chipsets where PCI->PCI transfers vanish or hang 186 * Chipsets where PCI->PCI transfers vanish or hang
167 */ 187 */
168static void __devinit quirk_nopcipci(struct pci_dev *dev) 188static void __devinit quirk_nopcipci(struct pci_dev *dev)
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
index 659eaa0fc48f..968cfea04f74 100644
--- a/drivers/pci/slot.c
+++ b/drivers/pci/slot.c
@@ -49,7 +49,7 @@ static ssize_t address_read_file(struct pci_slot *slot, char *buf)
49} 49}
50 50
51/* these strings match up with the values in pci_bus_speed */ 51/* these strings match up with the values in pci_bus_speed */
52static char *pci_bus_speed_strings[] = { 52static const char *pci_bus_speed_strings[] = {
53 "33 MHz PCI", /* 0x00 */ 53 "33 MHz PCI", /* 0x00 */
54 "66 MHz PCI", /* 0x01 */ 54 "66 MHz PCI", /* 0x01 */
55 "66 MHz PCI-X", /* 0x02 */ 55 "66 MHz PCI-X", /* 0x02 */
diff --git a/drivers/pcmcia/pcmcia_resource.c b/drivers/pcmcia/pcmcia_resource.c
index 54aa1c238cb3..9ba4dade69a4 100644
--- a/drivers/pcmcia/pcmcia_resource.c
+++ b/drivers/pcmcia/pcmcia_resource.c
@@ -163,7 +163,7 @@ static int pcmcia_access_config(struct pcmcia_device *p_dev,
163 c = p_dev->function_config; 163 c = p_dev->function_config;
164 164
165 if (!(c->state & CONFIG_LOCKED)) { 165 if (!(c->state & CONFIG_LOCKED)) {
166 dev_dbg(&s->dev, "Configuration isnt't locked\n"); 166 dev_dbg(&p_dev->dev, "Configuration isnt't locked\n");
167 mutex_unlock(&s->ops_mutex); 167 mutex_unlock(&s->ops_mutex);
168 return -EACCES; 168 return -EACCES;
169 } 169 }
@@ -220,7 +220,7 @@ int pcmcia_map_mem_page(struct pcmcia_device *p_dev, window_handle_t wh,
220 s->win[w].card_start = offset; 220 s->win[w].card_start = offset;
221 ret = s->ops->set_mem_map(s, &s->win[w]); 221 ret = s->ops->set_mem_map(s, &s->win[w]);
222 if (ret) 222 if (ret)
223 dev_warn(&s->dev, "failed to set_mem_map\n"); 223 dev_warn(&p_dev->dev, "failed to set_mem_map\n");
224 mutex_unlock(&s->ops_mutex); 224 mutex_unlock(&s->ops_mutex);
225 return ret; 225 return ret;
226} /* pcmcia_map_mem_page */ 226} /* pcmcia_map_mem_page */
@@ -244,18 +244,18 @@ int pcmcia_modify_configuration(struct pcmcia_device *p_dev,
244 c = p_dev->function_config; 244 c = p_dev->function_config;
245 245
246 if (!(s->state & SOCKET_PRESENT)) { 246 if (!(s->state & SOCKET_PRESENT)) {
247 dev_dbg(&s->dev, "No card present\n"); 247 dev_dbg(&p_dev->dev, "No card present\n");
248 ret = -ENODEV; 248 ret = -ENODEV;
249 goto unlock; 249 goto unlock;
250 } 250 }
251 if (!(c->state & CONFIG_LOCKED)) { 251 if (!(c->state & CONFIG_LOCKED)) {
252 dev_dbg(&s->dev, "Configuration isnt't locked\n"); 252 dev_dbg(&p_dev->dev, "Configuration isnt't locked\n");
253 ret = -EACCES; 253 ret = -EACCES;
254 goto unlock; 254 goto unlock;
255 } 255 }
256 256
257 if (mod->Attributes & (CONF_IRQ_CHANGE_VALID | CONF_VCC_CHANGE_VALID)) { 257 if (mod->Attributes & (CONF_IRQ_CHANGE_VALID | CONF_VCC_CHANGE_VALID)) {
258 dev_dbg(&s->dev, 258 dev_dbg(&p_dev->dev,
259 "changing Vcc or IRQ is not allowed at this time\n"); 259 "changing Vcc or IRQ is not allowed at this time\n");
260 ret = -EINVAL; 260 ret = -EINVAL;
261 goto unlock; 261 goto unlock;
@@ -265,20 +265,22 @@ int pcmcia_modify_configuration(struct pcmcia_device *p_dev,
265 if ((mod->Attributes & CONF_VPP1_CHANGE_VALID) && 265 if ((mod->Attributes & CONF_VPP1_CHANGE_VALID) &&
266 (mod->Attributes & CONF_VPP2_CHANGE_VALID)) { 266 (mod->Attributes & CONF_VPP2_CHANGE_VALID)) {
267 if (mod->Vpp1 != mod->Vpp2) { 267 if (mod->Vpp1 != mod->Vpp2) {
268 dev_dbg(&s->dev, "Vpp1 and Vpp2 must be the same\n"); 268 dev_dbg(&p_dev->dev,
269 "Vpp1 and Vpp2 must be the same\n");
269 ret = -EINVAL; 270 ret = -EINVAL;
270 goto unlock; 271 goto unlock;
271 } 272 }
272 s->socket.Vpp = mod->Vpp1; 273 s->socket.Vpp = mod->Vpp1;
273 if (s->ops->set_socket(s, &s->socket)) { 274 if (s->ops->set_socket(s, &s->socket)) {
274 dev_printk(KERN_WARNING, &s->dev, 275 dev_printk(KERN_WARNING, &p_dev->dev,
275 "Unable to set VPP\n"); 276 "Unable to set VPP\n");
276 ret = -EIO; 277 ret = -EIO;
277 goto unlock; 278 goto unlock;
278 } 279 }
279 } else if ((mod->Attributes & CONF_VPP1_CHANGE_VALID) || 280 } else if ((mod->Attributes & CONF_VPP1_CHANGE_VALID) ||
280 (mod->Attributes & CONF_VPP2_CHANGE_VALID)) { 281 (mod->Attributes & CONF_VPP2_CHANGE_VALID)) {
281 dev_dbg(&s->dev, "changing Vcc is not allowed at this time\n"); 282 dev_dbg(&p_dev->dev,
283 "changing Vcc is not allowed at this time\n");
282 ret = -EINVAL; 284 ret = -EINVAL;
283 goto unlock; 285 goto unlock;
284 } 286 }
@@ -401,7 +403,7 @@ int pcmcia_release_window(struct pcmcia_device *p_dev, struct resource *res)
401 win = &s->win[w]; 403 win = &s->win[w];
402 404
403 if (!(p_dev->_win & CLIENT_WIN_REQ(w))) { 405 if (!(p_dev->_win & CLIENT_WIN_REQ(w))) {
404 dev_dbg(&s->dev, "not releasing unknown window\n"); 406 dev_dbg(&p_dev->dev, "not releasing unknown window\n");
405 mutex_unlock(&s->ops_mutex); 407 mutex_unlock(&s->ops_mutex);
406 return -EINVAL; 408 return -EINVAL;
407 } 409 }
@@ -439,7 +441,7 @@ int pcmcia_request_configuration(struct pcmcia_device *p_dev,
439 return -ENODEV; 441 return -ENODEV;
440 442
441 if (req->IntType & INT_CARDBUS) { 443 if (req->IntType & INT_CARDBUS) {
442 dev_dbg(&s->dev, "IntType may not be INT_CARDBUS\n"); 444 dev_dbg(&p_dev->dev, "IntType may not be INT_CARDBUS\n");
443 return -EINVAL; 445 return -EINVAL;
444 } 446 }
445 447
@@ -447,7 +449,7 @@ int pcmcia_request_configuration(struct pcmcia_device *p_dev,
447 c = p_dev->function_config; 449 c = p_dev->function_config;
448 if (c->state & CONFIG_LOCKED) { 450 if (c->state & CONFIG_LOCKED) {
449 mutex_unlock(&s->ops_mutex); 451 mutex_unlock(&s->ops_mutex);
450 dev_dbg(&s->dev, "Configuration is locked\n"); 452 dev_dbg(&p_dev->dev, "Configuration is locked\n");
451 return -EACCES; 453 return -EACCES;
452 } 454 }
453 455
@@ -455,7 +457,7 @@ int pcmcia_request_configuration(struct pcmcia_device *p_dev,
455 s->socket.Vpp = req->Vpp; 457 s->socket.Vpp = req->Vpp;
456 if (s->ops->set_socket(s, &s->socket)) { 458 if (s->ops->set_socket(s, &s->socket)) {
457 mutex_unlock(&s->ops_mutex); 459 mutex_unlock(&s->ops_mutex);
458 dev_printk(KERN_WARNING, &s->dev, 460 dev_printk(KERN_WARNING, &p_dev->dev,
459 "Unable to set socket state\n"); 461 "Unable to set socket state\n");
460 return -EINVAL; 462 return -EINVAL;
461 } 463 }
@@ -569,19 +571,20 @@ int pcmcia_request_io(struct pcmcia_device *p_dev)
569 int ret = -EINVAL; 571 int ret = -EINVAL;
570 572
571 mutex_lock(&s->ops_mutex); 573 mutex_lock(&s->ops_mutex);
572 dev_dbg(&s->dev, "pcmcia_request_io: %pR , %pR", &c->io[0], &c->io[1]); 574 dev_dbg(&p_dev->dev, "pcmcia_request_io: %pR , %pR",
575 &c->io[0], &c->io[1]);
573 576
574 if (!(s->state & SOCKET_PRESENT)) { 577 if (!(s->state & SOCKET_PRESENT)) {
575 dev_dbg(&s->dev, "pcmcia_request_io: No card present\n"); 578 dev_dbg(&p_dev->dev, "pcmcia_request_io: No card present\n");
576 goto out; 579 goto out;
577 } 580 }
578 581
579 if (c->state & CONFIG_LOCKED) { 582 if (c->state & CONFIG_LOCKED) {
580 dev_dbg(&s->dev, "Configuration is locked\n"); 583 dev_dbg(&p_dev->dev, "Configuration is locked\n");
581 goto out; 584 goto out;
582 } 585 }
583 if (c->state & CONFIG_IO_REQ) { 586 if (c->state & CONFIG_IO_REQ) {
584 dev_dbg(&s->dev, "IO already configured\n"); 587 dev_dbg(&p_dev->dev, "IO already configured\n");
585 goto out; 588 goto out;
586 } 589 }
587 590
@@ -592,7 +595,13 @@ int pcmcia_request_io(struct pcmcia_device *p_dev)
592 if (c->io[1].end) { 595 if (c->io[1].end) {
593 ret = alloc_io_space(s, &c->io[1], p_dev->io_lines); 596 ret = alloc_io_space(s, &c->io[1], p_dev->io_lines);
594 if (ret) { 597 if (ret) {
598 struct resource tmp = c->io[0];
599 /* release the previously allocated resource */
595 release_io_space(s, &c->io[0]); 600 release_io_space(s, &c->io[0]);
601 /* but preserve the settings, for they worked... */
602 c->io[0].end = resource_size(&tmp);
603 c->io[0].start = tmp.start;
604 c->io[0].flags = tmp.flags;
596 goto out; 605 goto out;
597 } 606 }
598 } else 607 } else
@@ -601,7 +610,7 @@ int pcmcia_request_io(struct pcmcia_device *p_dev)
601 c->state |= CONFIG_IO_REQ; 610 c->state |= CONFIG_IO_REQ;
602 p_dev->_io = 1; 611 p_dev->_io = 1;
603 612
604 dev_dbg(&s->dev, "pcmcia_request_io succeeded: %pR , %pR", 613 dev_dbg(&p_dev->dev, "pcmcia_request_io succeeded: %pR , %pR",
605 &c->io[0], &c->io[1]); 614 &c->io[0], &c->io[1]);
606out: 615out:
607 mutex_unlock(&s->ops_mutex); 616 mutex_unlock(&s->ops_mutex);
@@ -800,7 +809,7 @@ int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, window_ha
800 int w; 809 int w;
801 810
802 if (!(s->state & SOCKET_PRESENT)) { 811 if (!(s->state & SOCKET_PRESENT)) {
803 dev_dbg(&s->dev, "No card present\n"); 812 dev_dbg(&p_dev->dev, "No card present\n");
804 return -ENODEV; 813 return -ENODEV;
805 } 814 }
806 815
@@ -809,12 +818,12 @@ int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, window_ha
809 req->Size = s->map_size; 818 req->Size = s->map_size;
810 align = (s->features & SS_CAP_MEM_ALIGN) ? req->Size : s->map_size; 819 align = (s->features & SS_CAP_MEM_ALIGN) ? req->Size : s->map_size;
811 if (req->Size & (s->map_size-1)) { 820 if (req->Size & (s->map_size-1)) {
812 dev_dbg(&s->dev, "invalid map size\n"); 821 dev_dbg(&p_dev->dev, "invalid map size\n");
813 return -EINVAL; 822 return -EINVAL;
814 } 823 }
815 if ((req->Base && (s->features & SS_CAP_STATIC_MAP)) || 824 if ((req->Base && (s->features & SS_CAP_STATIC_MAP)) ||
816 (req->Base & (align-1))) { 825 (req->Base & (align-1))) {
817 dev_dbg(&s->dev, "invalid base address\n"); 826 dev_dbg(&p_dev->dev, "invalid base address\n");
818 return -EINVAL; 827 return -EINVAL;
819 } 828 }
820 if (req->Base) 829 if (req->Base)
@@ -826,7 +835,7 @@ int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, window_ha
826 if (!(s->state & SOCKET_WIN_REQ(w))) 835 if (!(s->state & SOCKET_WIN_REQ(w)))
827 break; 836 break;
828 if (w == MAX_WIN) { 837 if (w == MAX_WIN) {
829 dev_dbg(&s->dev, "all windows are used already\n"); 838 dev_dbg(&p_dev->dev, "all windows are used already\n");
830 mutex_unlock(&s->ops_mutex); 839 mutex_unlock(&s->ops_mutex);
831 return -EINVAL; 840 return -EINVAL;
832 } 841 }
@@ -837,7 +846,7 @@ int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, window_ha
837 win->res = pcmcia_find_mem_region(req->Base, req->Size, align, 846 win->res = pcmcia_find_mem_region(req->Base, req->Size, align,
838 0, s); 847 0, s);
839 if (!win->res) { 848 if (!win->res) {
840 dev_dbg(&s->dev, "allocating mem region failed\n"); 849 dev_dbg(&p_dev->dev, "allocating mem region failed\n");
841 mutex_unlock(&s->ops_mutex); 850 mutex_unlock(&s->ops_mutex);
842 return -EINVAL; 851 return -EINVAL;
843 } 852 }
@@ -851,7 +860,7 @@ int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, window_ha
851 win->card_start = 0; 860 win->card_start = 0;
852 861
853 if (s->ops->set_mem_map(s, win) != 0) { 862 if (s->ops->set_mem_map(s, win) != 0) {
854 dev_dbg(&s->dev, "failed to set memory mapping\n"); 863 dev_dbg(&p_dev->dev, "failed to set memory mapping\n");
855 mutex_unlock(&s->ops_mutex); 864 mutex_unlock(&s->ops_mutex);
856 return -EIO; 865 return -EIO;
857 } 866 }
@@ -874,7 +883,7 @@ int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, window_ha
874 if (win->res) 883 if (win->res)
875 request_resource(&iomem_resource, res); 884 request_resource(&iomem_resource, res);
876 885
877 dev_dbg(&s->dev, "request_window results in %pR\n", res); 886 dev_dbg(&p_dev->dev, "request_window results in %pR\n", res);
878 887
879 mutex_unlock(&s->ops_mutex); 888 mutex_unlock(&s->ops_mutex);
880 *wh = res; 889 *wh = res;
diff --git a/drivers/pcmcia/pd6729.c b/drivers/pcmcia/pd6729.c
index b8a869af0f44..deef6656ab7b 100644
--- a/drivers/pcmcia/pd6729.c
+++ b/drivers/pcmcia/pd6729.c
@@ -646,7 +646,7 @@ static int __devinit pd6729_pci_probe(struct pci_dev *dev,
646 if (!pci_resource_start(dev, 0)) { 646 if (!pci_resource_start(dev, 0)) {
647 dev_warn(&dev->dev, "refusing to load the driver as the " 647 dev_warn(&dev->dev, "refusing to load the driver as the "
648 "io_base is NULL.\n"); 648 "io_base is NULL.\n");
649 goto err_out_free_mem; 649 goto err_out_disable;
650 } 650 }
651 651
652 dev_info(&dev->dev, "Cirrus PD6729 PCI to PCMCIA Bridge at 0x%llx " 652 dev_info(&dev->dev, "Cirrus PD6729 PCI to PCMCIA Bridge at 0x%llx "
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 044f430f3b43..cff7cc2c1f02 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -486,10 +486,12 @@ config TOPSTAR_LAPTOP
486config ACPI_TOSHIBA 486config ACPI_TOSHIBA
487 tristate "Toshiba Laptop Extras" 487 tristate "Toshiba Laptop Extras"
488 depends on ACPI 488 depends on ACPI
489 depends on LEDS_CLASS
490 depends on NEW_LEDS
491 depends on BACKLIGHT_CLASS_DEVICE
489 depends on INPUT 492 depends on INPUT
490 depends on RFKILL || RFKILL = n 493 depends on RFKILL || RFKILL = n
491 select INPUT_POLLDEV 494 select INPUT_POLLDEV
492 select BACKLIGHT_CLASS_DEVICE
493 ---help--- 495 ---help---
494 This driver adds support for access to certain system settings 496 This driver adds support for access to certain system settings
495 on "legacy free" Toshiba laptops. These laptops can be recognized by 497 on "legacy free" Toshiba laptops. These laptops can be recognized by
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index f15516374987..c1741142a4cb 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -79,12 +79,13 @@ struct bios_args {
79 u32 command; 79 u32 command;
80 u32 commandtype; 80 u32 commandtype;
81 u32 datasize; 81 u32 datasize;
82 char *data; 82 u32 data;
83}; 83};
84 84
85struct bios_return { 85struct bios_return {
86 u32 sigpass; 86 u32 sigpass;
87 u32 return_code; 87 u32 return_code;
88 u32 value;
88}; 89};
89 90
90struct key_entry { 91struct key_entry {
@@ -148,7 +149,7 @@ static struct platform_driver hp_wmi_driver = {
148 * buffer = kzalloc(128, GFP_KERNEL); 149 * buffer = kzalloc(128, GFP_KERNEL);
149 * ret = hp_wmi_perform_query(0x7, 0, buffer, 128) 150 * ret = hp_wmi_perform_query(0x7, 0, buffer, 128)
150 */ 151 */
151static int hp_wmi_perform_query(int query, int write, char *buffer, 152static int hp_wmi_perform_query(int query, int write, u32 *buffer,
152 int buffersize) 153 int buffersize)
153{ 154{
154 struct bios_return bios_return; 155 struct bios_return bios_return;
@@ -159,7 +160,7 @@ static int hp_wmi_perform_query(int query, int write, char *buffer,
159 .command = write ? 0x2 : 0x1, 160 .command = write ? 0x2 : 0x1,
160 .commandtype = query, 161 .commandtype = query,
161 .datasize = buffersize, 162 .datasize = buffersize,
162 .data = buffer, 163 .data = *buffer,
163 }; 164 };
164 struct acpi_buffer input = { sizeof(struct bios_args), &args }; 165 struct acpi_buffer input = { sizeof(struct bios_args), &args };
165 struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; 166 struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
@@ -177,29 +178,14 @@ static int hp_wmi_perform_query(int query, int write, char *buffer,
177 178
178 bios_return = *((struct bios_return *)obj->buffer.pointer); 179 bios_return = *((struct bios_return *)obj->buffer.pointer);
179 180
180 if (bios_return.return_code) { 181 memcpy(buffer, &bios_return.value, sizeof(bios_return.value));
181 printk(KERN_WARNING PREFIX "Query %d returned %d\n", query,
182 bios_return.return_code);
183 kfree(obj);
184 return bios_return.return_code;
185 }
186 if (obj->buffer.length - sizeof(bios_return) > buffersize) {
187 kfree(obj);
188 return -EINVAL;
189 }
190
191 memset(buffer, 0, buffersize);
192 memcpy(buffer,
193 ((char *)obj->buffer.pointer) + sizeof(struct bios_return),
194 obj->buffer.length - sizeof(bios_return));
195 kfree(obj);
196 return 0; 182 return 0;
197} 183}
198 184
199static int hp_wmi_display_state(void) 185static int hp_wmi_display_state(void)
200{ 186{
201 int state; 187 int state = 0;
202 int ret = hp_wmi_perform_query(HPWMI_DISPLAY_QUERY, 0, (char *)&state, 188 int ret = hp_wmi_perform_query(HPWMI_DISPLAY_QUERY, 0, &state,
203 sizeof(state)); 189 sizeof(state));
204 if (ret) 190 if (ret)
205 return -EINVAL; 191 return -EINVAL;
@@ -208,8 +194,8 @@ static int hp_wmi_display_state(void)
208 194
209static int hp_wmi_hddtemp_state(void) 195static int hp_wmi_hddtemp_state(void)
210{ 196{
211 int state; 197 int state = 0;
212 int ret = hp_wmi_perform_query(HPWMI_HDDTEMP_QUERY, 0, (char *)&state, 198 int ret = hp_wmi_perform_query(HPWMI_HDDTEMP_QUERY, 0, &state,
213 sizeof(state)); 199 sizeof(state));
214 if (ret) 200 if (ret)
215 return -EINVAL; 201 return -EINVAL;
@@ -218,8 +204,8 @@ static int hp_wmi_hddtemp_state(void)
218 204
219static int hp_wmi_als_state(void) 205static int hp_wmi_als_state(void)
220{ 206{
221 int state; 207 int state = 0;
222 int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, 0, (char *)&state, 208 int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, 0, &state,
223 sizeof(state)); 209 sizeof(state));
224 if (ret) 210 if (ret)
225 return -EINVAL; 211 return -EINVAL;
@@ -228,8 +214,8 @@ static int hp_wmi_als_state(void)
228 214
229static int hp_wmi_dock_state(void) 215static int hp_wmi_dock_state(void)
230{ 216{
231 int state; 217 int state = 0;
232 int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, (char *)&state, 218 int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, &state,
233 sizeof(state)); 219 sizeof(state));
234 220
235 if (ret) 221 if (ret)
@@ -240,8 +226,8 @@ static int hp_wmi_dock_state(void)
240 226
241static int hp_wmi_tablet_state(void) 227static int hp_wmi_tablet_state(void)
242{ 228{
243 int state; 229 int state = 0;
244 int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, (char *)&state, 230 int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, &state,
245 sizeof(state)); 231 sizeof(state));
246 if (ret) 232 if (ret)
247 return ret; 233 return ret;
@@ -256,7 +242,7 @@ static int hp_wmi_set_block(void *data, bool blocked)
256 int ret; 242 int ret;
257 243
258 ret = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1, 244 ret = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1,
259 (char *)&query, sizeof(query)); 245 &query, sizeof(query));
260 if (ret) 246 if (ret)
261 return -EINVAL; 247 return -EINVAL;
262 return 0; 248 return 0;
@@ -268,10 +254,10 @@ static const struct rfkill_ops hp_wmi_rfkill_ops = {
268 254
269static bool hp_wmi_get_sw_state(enum hp_wmi_radio r) 255static bool hp_wmi_get_sw_state(enum hp_wmi_radio r)
270{ 256{
271 int wireless; 257 int wireless = 0;
272 int mask; 258 int mask;
273 hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, 259 hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0,
274 (char *)&wireless, sizeof(wireless)); 260 &wireless, sizeof(wireless));
275 /* TBD: Pass error */ 261 /* TBD: Pass error */
276 262
277 mask = 0x200 << (r * 8); 263 mask = 0x200 << (r * 8);
@@ -284,10 +270,10 @@ static bool hp_wmi_get_sw_state(enum hp_wmi_radio r)
284 270
285static bool hp_wmi_get_hw_state(enum hp_wmi_radio r) 271static bool hp_wmi_get_hw_state(enum hp_wmi_radio r)
286{ 272{
287 int wireless; 273 int wireless = 0;
288 int mask; 274 int mask;
289 hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, 275 hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0,
290 (char *)&wireless, sizeof(wireless)); 276 &wireless, sizeof(wireless));
291 /* TBD: Pass error */ 277 /* TBD: Pass error */
292 278
293 mask = 0x800 << (r * 8); 279 mask = 0x800 << (r * 8);
@@ -347,7 +333,7 @@ static ssize_t set_als(struct device *dev, struct device_attribute *attr,
347 const char *buf, size_t count) 333 const char *buf, size_t count)
348{ 334{
349 u32 tmp = simple_strtoul(buf, NULL, 10); 335 u32 tmp = simple_strtoul(buf, NULL, 10);
350 int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, 1, (char *)&tmp, 336 int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, 1, &tmp,
351 sizeof(tmp)); 337 sizeof(tmp));
352 if (ret) 338 if (ret)
353 return -EINVAL; 339 return -EINVAL;
@@ -421,7 +407,7 @@ static void hp_wmi_notify(u32 value, void *context)
421 static struct key_entry *key; 407 static struct key_entry *key;
422 union acpi_object *obj; 408 union acpi_object *obj;
423 u32 event_id, event_data; 409 u32 event_id, event_data;
424 int key_code, ret; 410 int key_code = 0, ret;
425 u32 *location; 411 u32 *location;
426 acpi_status status; 412 acpi_status status;
427 413
@@ -475,7 +461,7 @@ static void hp_wmi_notify(u32 value, void *context)
475 break; 461 break;
476 case HPWMI_BEZEL_BUTTON: 462 case HPWMI_BEZEL_BUTTON:
477 ret = hp_wmi_perform_query(HPWMI_HOTKEY_QUERY, 0, 463 ret = hp_wmi_perform_query(HPWMI_HOTKEY_QUERY, 0,
478 (char *)&key_code, 464 &key_code,
479 sizeof(key_code)); 465 sizeof(key_code));
480 if (ret) 466 if (ret)
481 break; 467 break;
@@ -578,9 +564,9 @@ static void cleanup_sysfs(struct platform_device *device)
578static int __devinit hp_wmi_bios_setup(struct platform_device *device) 564static int __devinit hp_wmi_bios_setup(struct platform_device *device)
579{ 565{
580 int err; 566 int err;
581 int wireless; 567 int wireless = 0;
582 568
583 err = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, (char *)&wireless, 569 err = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, &wireless,
584 sizeof(wireless)); 570 sizeof(wireless));
585 if (err) 571 if (err)
586 return err; 572 return err;
diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c
index 9024480a8228..c44a5e8b8b82 100644
--- a/drivers/platform/x86/intel_ips.c
+++ b/drivers/platform/x86/intel_ips.c
@@ -51,7 +51,6 @@
51 * TODO: 51 * TODO:
52 * - handle CPU hotplug 52 * - handle CPU hotplug
53 * - provide turbo enable/disable api 53 * - provide turbo enable/disable api
54 * - make sure we can write turbo enable/disable reg based on MISC_EN
55 * 54 *
56 * Related documents: 55 * Related documents:
57 * - CDI 403777, 403778 - Auburndale EDS vol 1 & 2 56 * - CDI 403777, 403778 - Auburndale EDS vol 1 & 2
@@ -230,7 +229,7 @@
230#define THM_TC2 0xac 229#define THM_TC2 0xac
231#define THM_DTV 0xb0 230#define THM_DTV 0xb0
232#define THM_ITV 0xd8 231#define THM_ITV 0xd8
233#define ITV_ME_SEQNO_MASK 0x000f0000 /* ME should update every ~200ms */ 232#define ITV_ME_SEQNO_MASK 0x00ff0000 /* ME should update every ~200ms */
234#define ITV_ME_SEQNO_SHIFT (16) 233#define ITV_ME_SEQNO_SHIFT (16)
235#define ITV_MCH_TEMP_MASK 0x0000ff00 234#define ITV_MCH_TEMP_MASK 0x0000ff00
236#define ITV_MCH_TEMP_SHIFT (8) 235#define ITV_MCH_TEMP_SHIFT (8)
@@ -325,6 +324,7 @@ struct ips_driver {
325 bool gpu_preferred; 324 bool gpu_preferred;
326 bool poll_turbo_status; 325 bool poll_turbo_status;
327 bool second_cpu; 326 bool second_cpu;
327 bool turbo_toggle_allowed;
328 struct ips_mcp_limits *limits; 328 struct ips_mcp_limits *limits;
329 329
330 /* Optional MCH interfaces for if i915 is in use */ 330 /* Optional MCH interfaces for if i915 is in use */
@@ -415,7 +415,7 @@ static void ips_cpu_lower(struct ips_driver *ips)
415 new_limit = cur_limit - 8; /* 1W decrease */ 415 new_limit = cur_limit - 8; /* 1W decrease */
416 416
417 /* Clamp to SKU TDP limit */ 417 /* Clamp to SKU TDP limit */
418 if (((new_limit * 10) / 8) < (ips->orig_turbo_limit & TURBO_TDP_MASK)) 418 if (new_limit < (ips->orig_turbo_limit & TURBO_TDP_MASK))
419 new_limit = ips->orig_turbo_limit & TURBO_TDP_MASK; 419 new_limit = ips->orig_turbo_limit & TURBO_TDP_MASK;
420 420
421 thm_writew(THM_MPCPC, (new_limit * 10) / 8); 421 thm_writew(THM_MPCPC, (new_limit * 10) / 8);
@@ -461,7 +461,8 @@ static void ips_enable_cpu_turbo(struct ips_driver *ips)
461 if (ips->__cpu_turbo_on) 461 if (ips->__cpu_turbo_on)
462 return; 462 return;
463 463
464 on_each_cpu(do_enable_cpu_turbo, ips, 1); 464 if (ips->turbo_toggle_allowed)
465 on_each_cpu(do_enable_cpu_turbo, ips, 1);
465 466
466 ips->__cpu_turbo_on = true; 467 ips->__cpu_turbo_on = true;
467} 468}
@@ -498,7 +499,8 @@ static void ips_disable_cpu_turbo(struct ips_driver *ips)
498 if (!ips->__cpu_turbo_on) 499 if (!ips->__cpu_turbo_on)
499 return; 500 return;
500 501
501 on_each_cpu(do_disable_cpu_turbo, ips, 1); 502 if (ips->turbo_toggle_allowed)
503 on_each_cpu(do_disable_cpu_turbo, ips, 1);
502 504
503 ips->__cpu_turbo_on = false; 505 ips->__cpu_turbo_on = false;
504} 506}
@@ -598,17 +600,29 @@ static bool mcp_exceeded(struct ips_driver *ips)
598{ 600{
599 unsigned long flags; 601 unsigned long flags;
600 bool ret = false; 602 bool ret = false;
603 u32 temp_limit;
604 u32 avg_power;
605 const char *msg = "MCP limit exceeded: ";
601 606
602 spin_lock_irqsave(&ips->turbo_status_lock, flags); 607 spin_lock_irqsave(&ips->turbo_status_lock, flags);
603 if (ips->mcp_avg_temp > (ips->mcp_temp_limit * 100)) 608
604 ret = true; 609 temp_limit = ips->mcp_temp_limit * 100;
605 if (ips->cpu_avg_power + ips->mch_avg_power > ips->mcp_power_limit) 610 if (ips->mcp_avg_temp > temp_limit) {
611 dev_info(&ips->dev->dev,
612 "%sAvg temp %u, limit %u\n", msg, ips->mcp_avg_temp,
613 temp_limit);
606 ret = true; 614 ret = true;
607 spin_unlock_irqrestore(&ips->turbo_status_lock, flags); 615 }
608 616
609 if (ret) 617 avg_power = ips->cpu_avg_power + ips->mch_avg_power;
618 if (avg_power > ips->mcp_power_limit) {
610 dev_info(&ips->dev->dev, 619 dev_info(&ips->dev->dev,
611 "MCP power or thermal limit exceeded\n"); 620 "%sAvg power %u, limit %u\n", msg, avg_power,
621 ips->mcp_power_limit);
622 ret = true;
623 }
624
625 spin_unlock_irqrestore(&ips->turbo_status_lock, flags);
612 626
613 return ret; 627 return ret;
614} 628}
@@ -663,6 +677,27 @@ static bool mch_exceeded(struct ips_driver *ips)
663} 677}
664 678
665/** 679/**
680 * verify_limits - verify BIOS provided limits
681 * @ips: IPS structure
682 *
683 * BIOS can optionally provide non-default limits for power and temp. Check
684 * them here and use the defaults if the BIOS values are not provided or
685 * are otherwise unusable.
686 */
687static void verify_limits(struct ips_driver *ips)
688{
689 if (ips->mcp_power_limit < ips->limits->mcp_power_limit ||
690 ips->mcp_power_limit > 35000)
691 ips->mcp_power_limit = ips->limits->mcp_power_limit;
692
693 if (ips->mcp_temp_limit < ips->limits->core_temp_limit ||
694 ips->mcp_temp_limit < ips->limits->mch_temp_limit ||
695 ips->mcp_temp_limit > 150)
696 ips->mcp_temp_limit = min(ips->limits->core_temp_limit,
697 ips->limits->mch_temp_limit);
698}
699
700/**
666 * update_turbo_limits - get various limits & settings from regs 701 * update_turbo_limits - get various limits & settings from regs
667 * @ips: IPS driver struct 702 * @ips: IPS driver struct
668 * 703 *
@@ -680,12 +715,21 @@ static void update_turbo_limits(struct ips_driver *ips)
680 u32 hts = thm_readl(THM_HTS); 715 u32 hts = thm_readl(THM_HTS);
681 716
682 ips->cpu_turbo_enabled = !(hts & HTS_PCTD_DIS); 717 ips->cpu_turbo_enabled = !(hts & HTS_PCTD_DIS);
683 ips->gpu_turbo_enabled = !(hts & HTS_GTD_DIS); 718 /*
719 * Disable turbo for now, until we can figure out why the power figures
720 * are wrong
721 */
722 ips->cpu_turbo_enabled = false;
723
724 if (ips->gpu_busy)
725 ips->gpu_turbo_enabled = !(hts & HTS_GTD_DIS);
726
684 ips->core_power_limit = thm_readw(THM_MPCPC); 727 ips->core_power_limit = thm_readw(THM_MPCPC);
685 ips->mch_power_limit = thm_readw(THM_MMGPC); 728 ips->mch_power_limit = thm_readw(THM_MMGPC);
686 ips->mcp_temp_limit = thm_readw(THM_PTL); 729 ips->mcp_temp_limit = thm_readw(THM_PTL);
687 ips->mcp_power_limit = thm_readw(THM_MPPC); 730 ips->mcp_power_limit = thm_readw(THM_MPPC);
688 731
732 verify_limits(ips);
689 /* Ignore BIOS CPU vs GPU pref */ 733 /* Ignore BIOS CPU vs GPU pref */
690} 734}
691 735
@@ -858,7 +902,7 @@ static u32 get_cpu_power(struct ips_driver *ips, u32 *last, int period)
858 ret = (ret * 1000) / 65535; 902 ret = (ret * 1000) / 65535;
859 *last = val; 903 *last = val;
860 904
861 return ret; 905 return 0;
862} 906}
863 907
864static const u16 temp_decay_factor = 2; 908static const u16 temp_decay_factor = 2;
@@ -940,7 +984,6 @@ static int ips_monitor(void *data)
940 kfree(mch_samples); 984 kfree(mch_samples);
941 kfree(cpu_samples); 985 kfree(cpu_samples);
942 kfree(mchp_samples); 986 kfree(mchp_samples);
943 kthread_stop(ips->adjust);
944 return -ENOMEM; 987 return -ENOMEM;
945 } 988 }
946 989
@@ -948,7 +991,7 @@ static int ips_monitor(void *data)
948 ITV_ME_SEQNO_SHIFT; 991 ITV_ME_SEQNO_SHIFT;
949 seqno_timestamp = get_jiffies_64(); 992 seqno_timestamp = get_jiffies_64();
950 993
951 old_cpu_power = thm_readl(THM_CEC) / 65535; 994 old_cpu_power = thm_readl(THM_CEC);
952 schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD)); 995 schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD));
953 996
954 /* Collect an initial average */ 997 /* Collect an initial average */
@@ -1150,11 +1193,18 @@ static irqreturn_t ips_irq_handler(int irq, void *arg)
1150 STS_GPL_SHIFT; 1193 STS_GPL_SHIFT;
1151 /* ignore EC CPU vs GPU pref */ 1194 /* ignore EC CPU vs GPU pref */
1152 ips->cpu_turbo_enabled = !(sts & STS_PCTD_DIS); 1195 ips->cpu_turbo_enabled = !(sts & STS_PCTD_DIS);
1153 ips->gpu_turbo_enabled = !(sts & STS_GTD_DIS); 1196 /*
1197 * Disable turbo for now, until we can figure
1198 * out why the power figures are wrong
1199 */
1200 ips->cpu_turbo_enabled = false;
1201 if (ips->gpu_busy)
1202 ips->gpu_turbo_enabled = !(sts & STS_GTD_DIS);
1154 ips->mcp_temp_limit = (sts & STS_PTL_MASK) >> 1203 ips->mcp_temp_limit = (sts & STS_PTL_MASK) >>
1155 STS_PTL_SHIFT; 1204 STS_PTL_SHIFT;
1156 ips->mcp_power_limit = (tc1 & STS_PPL_MASK) >> 1205 ips->mcp_power_limit = (tc1 & STS_PPL_MASK) >>
1157 STS_PPL_SHIFT; 1206 STS_PPL_SHIFT;
1207 verify_limits(ips);
1158 spin_unlock(&ips->turbo_status_lock); 1208 spin_unlock(&ips->turbo_status_lock);
1159 1209
1160 thm_writeb(THM_SEC, SEC_ACK); 1210 thm_writeb(THM_SEC, SEC_ACK);
@@ -1333,8 +1383,10 @@ static struct ips_mcp_limits *ips_detect_cpu(struct ips_driver *ips)
1333 * turbo manually or we'll get an illegal MSR access, even though 1383 * turbo manually or we'll get an illegal MSR access, even though
1334 * turbo will still be available. 1384 * turbo will still be available.
1335 */ 1385 */
1336 if (!(misc_en & IA32_MISC_TURBO_EN)) 1386 if (misc_en & IA32_MISC_TURBO_EN)
1337 ; /* add turbo MSR write allowed flag if necessary */ 1387 ips->turbo_toggle_allowed = true;
1388 else
1389 ips->turbo_toggle_allowed = false;
1338 1390
1339 if (strstr(boot_cpu_data.x86_model_id, "CPU M")) 1391 if (strstr(boot_cpu_data.x86_model_id, "CPU M"))
1340 limits = &ips_sv_limits; 1392 limits = &ips_sv_limits;
@@ -1351,9 +1403,10 @@ static struct ips_mcp_limits *ips_detect_cpu(struct ips_driver *ips)
1351 tdp = turbo_power & TURBO_TDP_MASK; 1403 tdp = turbo_power & TURBO_TDP_MASK;
1352 1404
1353 /* Sanity check TDP against CPU */ 1405 /* Sanity check TDP against CPU */
1354 if (limits->mcp_power_limit != (tdp / 8) * 1000) { 1406 if (limits->core_power_limit != (tdp / 8) * 1000) {
1355 dev_warn(&ips->dev->dev, "Warning: CPU TDP doesn't match expected value (found %d, expected %d)\n", 1407 dev_info(&ips->dev->dev, "CPU TDP doesn't match expected value (found %d, expected %d)\n",
1356 tdp / 8, limits->mcp_power_limit / 1000); 1408 tdp / 8, limits->core_power_limit / 1000);
1409 limits->core_power_limit = (tdp / 8) * 1000;
1357 } 1410 }
1358 1411
1359out: 1412out:
@@ -1390,7 +1443,7 @@ static bool ips_get_i915_syms(struct ips_driver *ips)
1390 return true; 1443 return true;
1391 1444
1392out_put_busy: 1445out_put_busy:
1393 symbol_put(i915_gpu_turbo_disable); 1446 symbol_put(i915_gpu_busy);
1394out_put_lower: 1447out_put_lower:
1395 symbol_put(i915_gpu_lower); 1448 symbol_put(i915_gpu_lower);
1396out_put_raise: 1449out_put_raise:
@@ -1532,22 +1585,27 @@ static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id)
1532 /* Save turbo limits & ratios */ 1585 /* Save turbo limits & ratios */
1533 rdmsrl(TURBO_POWER_CURRENT_LIMIT, ips->orig_turbo_limit); 1586 rdmsrl(TURBO_POWER_CURRENT_LIMIT, ips->orig_turbo_limit);
1534 1587
1535 ips_enable_cpu_turbo(ips); 1588 ips_disable_cpu_turbo(ips);
1536 ips->cpu_turbo_enabled = true; 1589 ips->cpu_turbo_enabled = false;
1537 1590
1538 /* Set up the work queue and monitor/adjust threads */ 1591 /* Create thermal adjust thread */
1539 ips->monitor = kthread_run(ips_monitor, ips, "ips-monitor"); 1592 ips->adjust = kthread_create(ips_adjust, ips, "ips-adjust");
1540 if (IS_ERR(ips->monitor)) { 1593 if (IS_ERR(ips->adjust)) {
1541 dev_err(&dev->dev, 1594 dev_err(&dev->dev,
1542 "failed to create thermal monitor thread, aborting\n"); 1595 "failed to create thermal adjust thread, aborting\n");
1543 ret = -ENOMEM; 1596 ret = -ENOMEM;
1544 goto error_free_irq; 1597 goto error_free_irq;
1598
1545 } 1599 }
1546 1600
1547 ips->adjust = kthread_create(ips_adjust, ips, "ips-adjust"); 1601 /*
1548 if (IS_ERR(ips->adjust)) { 1602 * Set up the work queue and monitor thread. The monitor thread
1603 * will wake up ips_adjust thread.
1604 */
1605 ips->monitor = kthread_run(ips_monitor, ips, "ips-monitor");
1606 if (IS_ERR(ips->monitor)) {
1549 dev_err(&dev->dev, 1607 dev_err(&dev->dev,
1550 "failed to create thermal adjust thread, aborting\n"); 1608 "failed to create thermal monitor thread, aborting\n");
1551 ret = -ENOMEM; 1609 ret = -ENOMEM;
1552 goto error_thread_cleanup; 1610 goto error_thread_cleanup;
1553 } 1611 }
@@ -1566,7 +1624,7 @@ static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id)
1566 return ret; 1624 return ret;
1567 1625
1568error_thread_cleanup: 1626error_thread_cleanup:
1569 kthread_stop(ips->monitor); 1627 kthread_stop(ips->adjust);
1570error_free_irq: 1628error_free_irq:
1571 free_irq(ips->dev->irq, ips); 1629 free_irq(ips->dev->irq, ips);
1572error_unmap: 1630error_unmap:
diff --git a/drivers/platform/x86/intel_rar_register.c b/drivers/platform/x86/intel_rar_register.c
index 73f8e6d72669..2b11a33325e6 100644
--- a/drivers/platform/x86/intel_rar_register.c
+++ b/drivers/platform/x86/intel_rar_register.c
@@ -145,7 +145,7 @@ static void free_rar_device(struct rar_device *rar)
145 */ 145 */
146static struct rar_device *_rar_to_device(int rar, int *off) 146static struct rar_device *_rar_to_device(int rar, int *off)
147{ 147{
148 if (rar >= 0 && rar <= 3) { 148 if (rar >= 0 && rar < MRST_NUM_RAR) {
149 *off = rar; 149 *off = rar;
150 return &my_rar_device; 150 return &my_rar_device;
151 } 151 }
diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c
index 943f9084dcb1..6abe18e638e9 100644
--- a/drivers/platform/x86/intel_scu_ipc.c
+++ b/drivers/platform/x86/intel_scu_ipc.c
@@ -487,7 +487,7 @@ int intel_scu_ipc_i2c_cntrl(u32 addr, u32 *data)
487 mdelay(1); 487 mdelay(1);
488 *data = readl(ipcdev.i2c_base + I2C_DATA_ADDR); 488 *data = readl(ipcdev.i2c_base + I2C_DATA_ADDR);
489 } else if (cmd == IPC_I2C_WRITE) { 489 } else if (cmd == IPC_I2C_WRITE) {
490 writel(addr, ipcdev.i2c_base + I2C_DATA_ADDR); 490 writel(*data, ipcdev.i2c_base + I2C_DATA_ADDR);
491 mdelay(1); 491 mdelay(1);
492 writel(addr, ipcdev.i2c_base + IPC_I2C_CNTRL_ADDR); 492 writel(addr, ipcdev.i2c_base + IPC_I2C_CNTRL_ADDR);
493 } else { 493 } else {
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index e35ed128bdef..2d61186ad5a2 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -3093,7 +3093,8 @@ static const struct tpacpi_quirk tpacpi_hotkey_qtable[] __initconst = {
3093 TPACPI_Q_IBM('1', 'D', TPACPI_HK_Q_INIMASK), /* X22, X23, X24 */ 3093 TPACPI_Q_IBM('1', 'D', TPACPI_HK_Q_INIMASK), /* X22, X23, X24 */
3094}; 3094};
3095 3095
3096typedef u16 tpacpi_keymap_t[TPACPI_HOTKEY_MAP_LEN]; 3096typedef u16 tpacpi_keymap_entry_t;
3097typedef tpacpi_keymap_entry_t tpacpi_keymap_t[TPACPI_HOTKEY_MAP_LEN];
3097 3098
3098static int __init hotkey_init(struct ibm_init_struct *iibm) 3099static int __init hotkey_init(struct ibm_init_struct *iibm)
3099{ 3100{
@@ -3230,7 +3231,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
3230 }; 3231 };
3231 3232
3232#define TPACPI_HOTKEY_MAP_SIZE sizeof(tpacpi_keymap_t) 3233#define TPACPI_HOTKEY_MAP_SIZE sizeof(tpacpi_keymap_t)
3233#define TPACPI_HOTKEY_MAP_TYPESIZE sizeof(tpacpi_keymap_t[0]) 3234#define TPACPI_HOTKEY_MAP_TYPESIZE sizeof(tpacpi_keymap_entry_t)
3234 3235
3235 int res, i; 3236 int res, i;
3236 int status; 3237 int status;
diff --git a/drivers/power/apm_power.c b/drivers/power/apm_power.c
index 936bae560fa1..dc628cb2e762 100644
--- a/drivers/power/apm_power.c
+++ b/drivers/power/apm_power.c
@@ -233,6 +233,7 @@ static int calculate_capacity(enum apm_source source)
233 empty_design_prop = POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN; 233 empty_design_prop = POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN;
234 now_prop = POWER_SUPPLY_PROP_ENERGY_NOW; 234 now_prop = POWER_SUPPLY_PROP_ENERGY_NOW;
235 avg_prop = POWER_SUPPLY_PROP_ENERGY_AVG; 235 avg_prop = POWER_SUPPLY_PROP_ENERGY_AVG;
236 break;
236 case SOURCE_VOLTAGE: 237 case SOURCE_VOLTAGE:
237 full_prop = POWER_SUPPLY_PROP_VOLTAGE_MAX; 238 full_prop = POWER_SUPPLY_PROP_VOLTAGE_MAX;
238 empty_prop = POWER_SUPPLY_PROP_VOLTAGE_MIN; 239 empty_prop = POWER_SUPPLY_PROP_VOLTAGE_MIN;
diff --git a/drivers/power/intel_mid_battery.c b/drivers/power/intel_mid_battery.c
index c61ffec2ff10..2a10cd361181 100644
--- a/drivers/power/intel_mid_battery.c
+++ b/drivers/power/intel_mid_battery.c
@@ -185,8 +185,8 @@ static int pmic_scu_ipc_battery_property_get(struct battery_property *prop)
185{ 185{
186 u32 data[3]; 186 u32 data[3];
187 u8 *p = (u8 *)&data[1]; 187 u8 *p = (u8 *)&data[1];
188 int err = intel_scu_ipc_command(IPC_CMD_BATTERY_PROPERTY, 188 int err = intel_scu_ipc_command(IPCMSG_BATTERY,
189 IPCMSG_BATTERY, NULL, 0, data, 3); 189 IPC_CMD_BATTERY_PROPERTY, NULL, 0, data, 3);
190 190
191 prop->capacity = data[0]; 191 prop->capacity = data[0];
192 prop->crnt = *p++; 192 prop->crnt = *p++;
@@ -207,7 +207,7 @@ static int pmic_scu_ipc_battery_property_get(struct battery_property *prop)
207 207
208static int pmic_scu_ipc_set_charger(int charger) 208static int pmic_scu_ipc_set_charger(int charger)
209{ 209{
210 return intel_scu_ipc_simple_command(charger, IPCMSG_BATTERY); 210 return intel_scu_ipc_simple_command(IPCMSG_BATTERY, charger);
211} 211}
212 212
213/** 213/**
diff --git a/drivers/regulator/88pm8607.c b/drivers/regulator/88pm8607.c
index 7d149a8d8d9b..2ce2eb71d0f5 100644
--- a/drivers/regulator/88pm8607.c
+++ b/drivers/regulator/88pm8607.c
@@ -215,7 +215,7 @@ static int pm8607_list_voltage(struct regulator_dev *rdev, unsigned index)
215 struct pm8607_regulator_info *info = rdev_get_drvdata(rdev); 215 struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
216 int ret = -EINVAL; 216 int ret = -EINVAL;
217 217
218 if (info->vol_table && (index < (2 << info->vol_nbits))) { 218 if (info->vol_table && (index < (1 << info->vol_nbits))) {
219 ret = info->vol_table[index]; 219 ret = info->vol_table[index];
220 if (info->slope_double) 220 if (info->slope_double)
221 ret <<= 1; 221 ret <<= 1;
@@ -233,7 +233,7 @@ static int choose_voltage(struct regulator_dev *rdev, int min_uV, int max_uV)
233 max_uV = max_uV >> 1; 233 max_uV = max_uV >> 1;
234 } 234 }
235 if (info->vol_table) { 235 if (info->vol_table) {
236 for (i = 0; i < (2 << info->vol_nbits); i++) { 236 for (i = 0; i < (1 << info->vol_nbits); i++) {
237 if (!info->vol_table[i]) 237 if (!info->vol_table[i])
238 break; 238 break;
239 if ((min_uV <= info->vol_table[i]) 239 if ((min_uV <= info->vol_table[i])
diff --git a/drivers/regulator/ab3100.c b/drivers/regulator/ab3100.c
index 11790990277a..b349266a43de 100644
--- a/drivers/regulator/ab3100.c
+++ b/drivers/regulator/ab3100.c
@@ -634,12 +634,9 @@ static int __devinit ab3100_regulators_probe(struct platform_device *pdev)
634 "%s: failed to register regulator %s err %d\n", 634 "%s: failed to register regulator %s err %d\n",
635 __func__, ab3100_regulator_desc[i].name, 635 __func__, ab3100_regulator_desc[i].name,
636 err); 636 err);
637 i--;
638 /* remove the already registered regulators */ 637 /* remove the already registered regulators */
639 while (i > 0) { 638 while (--i >= 0)
640 regulator_unregister(ab3100_regulators[i].rdev); 639 regulator_unregister(ab3100_regulators[i].rdev);
641 i--;
642 }
643 return err; 640 return err;
644 } 641 }
645 642
diff --git a/drivers/regulator/ab8500.c b/drivers/regulator/ab8500.c
index dc3f1a491675..28c7ae67cec9 100644
--- a/drivers/regulator/ab8500.c
+++ b/drivers/regulator/ab8500.c
@@ -157,7 +157,7 @@ static int ab8500_list_voltage(struct regulator_dev *rdev, unsigned selector)
157 if (info->fixed_uV) 157 if (info->fixed_uV)
158 return info->fixed_uV; 158 return info->fixed_uV;
159 159
160 if (selector > info->voltages_len) 160 if (selector >= info->voltages_len)
161 return -EINVAL; 161 return -EINVAL;
162 162
163 return info->supported_voltages[selector]; 163 return info->supported_voltages[selector];
@@ -344,13 +344,14 @@ static inline struct ab8500_regulator_info *find_regulator_info(int id)
344static __devinit int ab8500_regulator_probe(struct platform_device *pdev) 344static __devinit int ab8500_regulator_probe(struct platform_device *pdev)
345{ 345{
346 struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent); 346 struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent);
347 struct ab8500_platform_data *pdata = dev_get_platdata(ab8500->dev); 347 struct ab8500_platform_data *pdata;
348 int i, err; 348 int i, err;
349 349
350 if (!ab8500) { 350 if (!ab8500) {
351 dev_err(&pdev->dev, "null mfd parent\n"); 351 dev_err(&pdev->dev, "null mfd parent\n");
352 return -EINVAL; 352 return -EINVAL;
353 } 353 }
354 pdata = dev_get_platdata(ab8500->dev);
354 355
355 /* register all regulators */ 356 /* register all regulators */
356 for (i = 0; i < ARRAY_SIZE(ab8500_regulator_info); i++) { 357 for (i = 0; i < ARRAY_SIZE(ab8500_regulator_info); i++) {
@@ -368,11 +369,9 @@ static __devinit int ab8500_regulator_probe(struct platform_device *pdev)
368 dev_err(&pdev->dev, "failed to register regulator %s\n", 369 dev_err(&pdev->dev, "failed to register regulator %s\n",
369 info->desc.name); 370 info->desc.name);
370 /* when we fail, un-register all earlier regulators */ 371 /* when we fail, un-register all earlier regulators */
371 i--; 372 while (--i >= 0) {
372 while (i > 0) {
373 info = &ab8500_regulator_info[i]; 373 info = &ab8500_regulator_info[i];
374 regulator_unregister(info->regulator); 374 regulator_unregister(info->regulator);
375 i--;
376 } 375 }
377 return err; 376 return err;
378 } 377 }
diff --git a/drivers/regulator/ad5398.c b/drivers/regulator/ad5398.c
index d59d2f2314af..a4be41614eeb 100644
--- a/drivers/regulator/ad5398.c
+++ b/drivers/regulator/ad5398.c
@@ -25,7 +25,7 @@ struct ad5398_chip_info {
25 unsigned int current_level; 25 unsigned int current_level;
26 unsigned int current_mask; 26 unsigned int current_mask;
27 unsigned int current_offset; 27 unsigned int current_offset;
28 struct regulator_dev rdev; 28 struct regulator_dev *rdev;
29}; 29};
30 30
31static int ad5398_calc_current(struct ad5398_chip_info *chip, 31static int ad5398_calc_current(struct ad5398_chip_info *chip,
@@ -211,7 +211,6 @@ MODULE_DEVICE_TABLE(i2c, ad5398_id);
211static int __devinit ad5398_probe(struct i2c_client *client, 211static int __devinit ad5398_probe(struct i2c_client *client,
212 const struct i2c_device_id *id) 212 const struct i2c_device_id *id)
213{ 213{
214 struct regulator_dev *rdev;
215 struct regulator_init_data *init_data = client->dev.platform_data; 214 struct regulator_init_data *init_data = client->dev.platform_data;
216 struct ad5398_chip_info *chip; 215 struct ad5398_chip_info *chip;
217 const struct ad5398_current_data_format *df = 216 const struct ad5398_current_data_format *df =
@@ -233,9 +232,10 @@ static int __devinit ad5398_probe(struct i2c_client *client,
233 chip->current_offset = df->current_offset; 232 chip->current_offset = df->current_offset;
234 chip->current_mask = (chip->current_level - 1) << chip->current_offset; 233 chip->current_mask = (chip->current_level - 1) << chip->current_offset;
235 234
236 rdev = regulator_register(&ad5398_reg, &client->dev, init_data, chip); 235 chip->rdev = regulator_register(&ad5398_reg, &client->dev,
237 if (IS_ERR(rdev)) { 236 init_data, chip);
238 ret = PTR_ERR(rdev); 237 if (IS_ERR(chip->rdev)) {
238 ret = PTR_ERR(chip->rdev);
239 dev_err(&client->dev, "failed to register %s %s\n", 239 dev_err(&client->dev, "failed to register %s %s\n",
240 id->name, ad5398_reg.name); 240 id->name, ad5398_reg.name);
241 goto err; 241 goto err;
@@ -254,9 +254,8 @@ static int __devexit ad5398_remove(struct i2c_client *client)
254{ 254{
255 struct ad5398_chip_info *chip = i2c_get_clientdata(client); 255 struct ad5398_chip_info *chip = i2c_get_clientdata(client);
256 256
257 regulator_unregister(&chip->rdev); 257 regulator_unregister(chip->rdev);
258 kfree(chip); 258 kfree(chip);
259 i2c_set_clientdata(client, NULL);
260 259
261 return 0; 260 return 0;
262} 261}
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 422a709d271d..cc8b337b9119 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -700,7 +700,7 @@ static void print_constraints(struct regulator_dev *rdev)
700 constraints->min_uA != constraints->max_uA) { 700 constraints->min_uA != constraints->max_uA) {
701 ret = _regulator_get_current_limit(rdev); 701 ret = _regulator_get_current_limit(rdev);
702 if (ret > 0) 702 if (ret > 0)
703 count += sprintf(buf + count, "at %d uA ", ret / 1000); 703 count += sprintf(buf + count, "at %d mA ", ret / 1000);
704 } 704 }
705 705
706 if (constraints->valid_modes_mask & REGULATOR_MODE_FAST) 706 if (constraints->valid_modes_mask & REGULATOR_MODE_FAST)
@@ -2302,8 +2302,10 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
2302 dev_set_name(&rdev->dev, "regulator.%d", 2302 dev_set_name(&rdev->dev, "regulator.%d",
2303 atomic_inc_return(&regulator_no) - 1); 2303 atomic_inc_return(&regulator_no) - 1);
2304 ret = device_register(&rdev->dev); 2304 ret = device_register(&rdev->dev);
2305 if (ret != 0) 2305 if (ret != 0) {
2306 put_device(&rdev->dev);
2306 goto clean; 2307 goto clean;
2308 }
2307 2309
2308 dev_set_drvdata(&rdev->dev, rdev); 2310 dev_set_drvdata(&rdev->dev, rdev);
2309 2311
diff --git a/drivers/regulator/isl6271a-regulator.c b/drivers/regulator/isl6271a-regulator.c
index e49d2bd393f2..b8cc6389a541 100644
--- a/drivers/regulator/isl6271a-regulator.c
+++ b/drivers/regulator/isl6271a-regulator.c
@@ -165,7 +165,7 @@ static int __devinit isl6271a_probe(struct i2c_client *i2c,
165 mutex_init(&pmic->mtx); 165 mutex_init(&pmic->mtx);
166 166
167 for (i = 0; i < 3; i++) { 167 for (i = 0; i < 3; i++) {
168 pmic->rdev[i] = regulator_register(&isl_rd[0], &i2c->dev, 168 pmic->rdev[i] = regulator_register(&isl_rd[i], &i2c->dev,
169 init_data, pmic); 169 init_data, pmic);
170 if (IS_ERR(pmic->rdev[i])) { 170 if (IS_ERR(pmic->rdev[i])) {
171 dev_err(&i2c->dev, "failed to register %s\n", id->name); 171 dev_err(&i2c->dev, "failed to register %s\n", id->name);
@@ -191,8 +191,6 @@ static int __devexit isl6271a_remove(struct i2c_client *i2c)
191 struct isl_pmic *pmic = i2c_get_clientdata(i2c); 191 struct isl_pmic *pmic = i2c_get_clientdata(i2c);
192 int i; 192 int i;
193 193
194 i2c_set_clientdata(i2c, NULL);
195
196 for (i = 0; i < 3; i++) 194 for (i = 0; i < 3; i++)
197 regulator_unregister(pmic->rdev[i]); 195 regulator_unregister(pmic->rdev[i]);
198 196
diff --git a/drivers/regulator/max1586.c b/drivers/regulator/max1586.c
index 8867c2710a6d..559cfa271a44 100644
--- a/drivers/regulator/max1586.c
+++ b/drivers/regulator/max1586.c
@@ -121,14 +121,14 @@ static int max1586_v6_set(struct regulator_dev *rdev, int min_uV, int max_uV)
121 if (max_uV < MAX1586_V6_MIN_UV || max_uV > MAX1586_V6_MAX_UV) 121 if (max_uV < MAX1586_V6_MIN_UV || max_uV > MAX1586_V6_MAX_UV)
122 return -EINVAL; 122 return -EINVAL;
123 123
124 if (min_uV >= 3000000)
125 selector = 3;
126 if (min_uV < 3000000)
127 selector = 2;
128 if (min_uV < 2500000)
129 selector = 1;
130 if (min_uV < 1800000) 124 if (min_uV < 1800000)
131 selector = 0; 125 selector = 0;
126 else if (min_uV < 2500000)
127 selector = 1;
128 else if (min_uV < 3000000)
129 selector = 2;
130 else if (min_uV >= 3000000)
131 selector = 3;
132 132
133 if (max1586_v6_calc_voltage(selector) > max_uV) 133 if (max1586_v6_calc_voltage(selector) > max_uV)
134 return -EINVAL; 134 return -EINVAL;
diff --git a/drivers/regulator/max8649.c b/drivers/regulator/max8649.c
index 4520ace3f7e7..6b60a9c0366b 100644
--- a/drivers/regulator/max8649.c
+++ b/drivers/regulator/max8649.c
@@ -330,7 +330,7 @@ static int __devinit max8649_regulator_probe(struct i2c_client *client,
330 /* set external clock frequency */ 330 /* set external clock frequency */
331 info->extclk_freq = pdata->extclk_freq; 331 info->extclk_freq = pdata->extclk_freq;
332 max8649_set_bits(info->i2c, MAX8649_SYNC, MAX8649_EXT_MASK, 332 max8649_set_bits(info->i2c, MAX8649_SYNC, MAX8649_EXT_MASK,
333 info->extclk_freq); 333 info->extclk_freq << 6);
334 } 334 }
335 335
336 if (pdata->ramp_timing) { 336 if (pdata->ramp_timing) {
diff --git a/drivers/regulator/max8998.c b/drivers/regulator/max8998.c
index ab67298799f9..a1baf1fbe004 100644
--- a/drivers/regulator/max8998.c
+++ b/drivers/regulator/max8998.c
@@ -549,7 +549,7 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
549 if (!max8998) 549 if (!max8998)
550 return -ENOMEM; 550 return -ENOMEM;
551 551
552 size = sizeof(struct regulator_dev *) * (pdata->num_regulators + 1); 552 size = sizeof(struct regulator_dev *) * pdata->num_regulators;
553 max8998->rdev = kzalloc(size, GFP_KERNEL); 553 max8998->rdev = kzalloc(size, GFP_KERNEL);
554 if (!max8998->rdev) { 554 if (!max8998->rdev) {
555 kfree(max8998); 555 kfree(max8998);
@@ -557,7 +557,9 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
557 } 557 }
558 558
559 rdev = max8998->rdev; 559 rdev = max8998->rdev;
560 max8998->dev = &pdev->dev;
560 max8998->iodev = iodev; 561 max8998->iodev = iodev;
562 max8998->num_regulators = pdata->num_regulators;
561 platform_set_drvdata(pdev, max8998); 563 platform_set_drvdata(pdev, max8998);
562 564
563 for (i = 0; i < pdata->num_regulators; i++) { 565 for (i = 0; i < pdata->num_regulators; i++) {
@@ -583,7 +585,7 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
583 585
584 return 0; 586 return 0;
585err: 587err:
586 for (i = 0; i <= max8998->num_regulators; i++) 588 for (i = 0; i < max8998->num_regulators; i++)
587 if (rdev[i]) 589 if (rdev[i])
588 regulator_unregister(rdev[i]); 590 regulator_unregister(rdev[i]);
589 591
@@ -599,7 +601,7 @@ static int __devexit max8998_pmic_remove(struct platform_device *pdev)
599 struct regulator_dev **rdev = max8998->rdev; 601 struct regulator_dev **rdev = max8998->rdev;
600 int i; 602 int i;
601 603
602 for (i = 0; i <= max8998->num_regulators; i++) 604 for (i = 0; i < max8998->num_regulators; i++)
603 if (rdev[i]) 605 if (rdev[i])
604 regulator_unregister(rdev[i]); 606 regulator_unregister(rdev[i]);
605 607
diff --git a/drivers/regulator/tps6507x-regulator.c b/drivers/regulator/tps6507x-regulator.c
index c239f42aa4a3..020f5878d7ff 100644
--- a/drivers/regulator/tps6507x-regulator.c
+++ b/drivers/regulator/tps6507x-regulator.c
@@ -626,12 +626,6 @@ fail:
626 return error; 626 return error;
627} 627}
628 628
629/**
630 * tps6507x_remove - TPS6507x driver i2c remove handler
631 * @client: i2c driver client device structure
632 *
633 * Unregister TPS driver as an i2c client device driver
634 */
635static int __devexit tps6507x_pmic_remove(struct platform_device *pdev) 629static int __devexit tps6507x_pmic_remove(struct platform_device *pdev)
636{ 630{
637 struct tps6507x_dev *tps6507x_dev = platform_get_drvdata(pdev); 631 struct tps6507x_dev *tps6507x_dev = platform_get_drvdata(pdev);
diff --git a/drivers/regulator/tps6586x-regulator.c b/drivers/regulator/tps6586x-regulator.c
index 8cff1413a147..51237fbb1bbb 100644
--- a/drivers/regulator/tps6586x-regulator.c
+++ b/drivers/regulator/tps6586x-regulator.c
@@ -133,7 +133,7 @@ static int tps6586x_ldo_get_voltage(struct regulator_dev *rdev)
133 mask = ((1 << ri->volt_nbits) - 1) << ri->volt_shift; 133 mask = ((1 << ri->volt_nbits) - 1) << ri->volt_shift;
134 val = (val & mask) >> ri->volt_shift; 134 val = (val & mask) >> ri->volt_shift;
135 135
136 if (val > ri->desc.n_voltages) 136 if (val >= ri->desc.n_voltages)
137 BUG(); 137 BUG();
138 138
139 return ri->voltages[val] * 1000; 139 return ri->voltages[val] * 1000;
@@ -150,7 +150,7 @@ static int tps6586x_dvm_set_voltage(struct regulator_dev *rdev,
150 if (ret) 150 if (ret)
151 return ret; 151 return ret;
152 152
153 return tps6586x_set_bits(parent, ri->go_reg, ri->go_bit); 153 return tps6586x_set_bits(parent, ri->go_reg, 1 << ri->go_bit);
154} 154}
155 155
156static int tps6586x_regulator_enable(struct regulator_dev *rdev) 156static int tps6586x_regulator_enable(struct regulator_dev *rdev)
diff --git a/drivers/regulator/wm831x-ldo.c b/drivers/regulator/wm831x-ldo.c
index e686cdb61b97..9edf8f692341 100644
--- a/drivers/regulator/wm831x-ldo.c
+++ b/drivers/regulator/wm831x-ldo.c
@@ -215,8 +215,7 @@ static int wm831x_gp_ldo_set_mode(struct regulator_dev *rdev,
215 215
216 case REGULATOR_MODE_IDLE: 216 case REGULATOR_MODE_IDLE:
217 ret = wm831x_set_bits(wm831x, ctrl_reg, 217 ret = wm831x_set_bits(wm831x, ctrl_reg,
218 WM831X_LDO1_LP_MODE, 218 WM831X_LDO1_LP_MODE, 0);
219 WM831X_LDO1_LP_MODE);
220 if (ret < 0) 219 if (ret < 0)
221 return ret; 220 return ret;
222 221
@@ -225,10 +224,12 @@ static int wm831x_gp_ldo_set_mode(struct regulator_dev *rdev,
225 WM831X_LDO1_ON_MODE); 224 WM831X_LDO1_ON_MODE);
226 if (ret < 0) 225 if (ret < 0)
227 return ret; 226 return ret;
227 break;
228 228
229 case REGULATOR_MODE_STANDBY: 229 case REGULATOR_MODE_STANDBY:
230 ret = wm831x_set_bits(wm831x, ctrl_reg, 230 ret = wm831x_set_bits(wm831x, ctrl_reg,
231 WM831X_LDO1_LP_MODE, 0); 231 WM831X_LDO1_LP_MODE,
232 WM831X_LDO1_LP_MODE);
232 if (ret < 0) 233 if (ret < 0)
233 return ret; 234 return ret;
234 235
diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c
index 0e6ed7db9364..fe4b8a8a9dfd 100644
--- a/drivers/regulator/wm8350-regulator.c
+++ b/drivers/regulator/wm8350-regulator.c
@@ -1129,7 +1129,7 @@ static unsigned int wm8350_dcdc_get_mode(struct regulator_dev *rdev)
1129 mode = REGULATOR_MODE_NORMAL; 1129 mode = REGULATOR_MODE_NORMAL;
1130 } else if (!active && !sleep) 1130 } else if (!active && !sleep)
1131 mode = REGULATOR_MODE_IDLE; 1131 mode = REGULATOR_MODE_IDLE;
1132 else if (!sleep) 1132 else if (sleep)
1133 mode = REGULATOR_MODE_STANDBY; 1133 mode = REGULATOR_MODE_STANDBY;
1134 1134
1135 return mode; 1135 return mode;
diff --git a/drivers/rtc/rtc-ab3100.c b/drivers/rtc/rtc-ab3100.c
index d26780ea254b..261a07e0fb24 100644
--- a/drivers/rtc/rtc-ab3100.c
+++ b/drivers/rtc/rtc-ab3100.c
@@ -235,6 +235,7 @@ static int __init ab3100_rtc_probe(struct platform_device *pdev)
235 err = PTR_ERR(rtc); 235 err = PTR_ERR(rtc);
236 return err; 236 return err;
237 } 237 }
238 platform_set_drvdata(pdev, rtc);
238 239
239 return 0; 240 return 0;
240} 241}
@@ -244,6 +245,7 @@ static int __exit ab3100_rtc_remove(struct platform_device *pdev)
244 struct rtc_device *rtc = platform_get_drvdata(pdev); 245 struct rtc_device *rtc = platform_get_drvdata(pdev);
245 246
246 rtc_device_unregister(rtc); 247 rtc_device_unregister(rtc);
248 platform_set_drvdata(pdev, NULL);
247 return 0; 249 return 0;
248} 250}
249 251
diff --git a/drivers/rtc/rtc-bfin.c b/drivers/rtc/rtc-bfin.c
index 72b2bcc2c224..d4fb82d85e9b 100644
--- a/drivers/rtc/rtc-bfin.c
+++ b/drivers/rtc/rtc-bfin.c
@@ -426,7 +426,7 @@ static int bfin_rtc_suspend(struct platform_device *pdev, pm_message_t state)
426 enable_irq_wake(IRQ_RTC); 426 enable_irq_wake(IRQ_RTC);
427 bfin_rtc_sync_pending(&pdev->dev); 427 bfin_rtc_sync_pending(&pdev->dev);
428 } else 428 } else
429 bfin_rtc_int_clear(-1); 429 bfin_rtc_int_clear(0);
430 430
431 return 0; 431 return 0;
432} 432}
@@ -435,8 +435,17 @@ static int bfin_rtc_resume(struct platform_device *pdev)
435{ 435{
436 if (device_may_wakeup(&pdev->dev)) 436 if (device_may_wakeup(&pdev->dev))
437 disable_irq_wake(IRQ_RTC); 437 disable_irq_wake(IRQ_RTC);
438 else 438
439 bfin_write_RTC_ISTAT(-1); 439 /*
440 * Since only some of the RTC bits are maintained externally in the
441 * Vbat domain, we need to wait for the RTC MMRs to be synced into
442 * the core after waking up. This happens every RTC 1HZ. Once that
443 * has happened, we can go ahead and re-enable the important write
444 * complete interrupt event.
445 */
446 while (!(bfin_read_RTC_ISTAT() & RTC_ISTAT_SEC))
447 continue;
448 bfin_rtc_int_set(RTC_ISTAT_WRITE_COMPLETE);
440 449
441 return 0; 450 return 0;
442} 451}
diff --git a/drivers/rtc/rtc-ds3232.c b/drivers/rtc/rtc-ds3232.c
index 9daed8db83d3..9de8516e3531 100644
--- a/drivers/rtc/rtc-ds3232.c
+++ b/drivers/rtc/rtc-ds3232.c
@@ -268,7 +268,6 @@ out_irq:
268 free_irq(client->irq, client); 268 free_irq(client->irq, client);
269 269
270out_free: 270out_free:
271 i2c_set_clientdata(client, NULL);
272 kfree(ds3232); 271 kfree(ds3232);
273 return ret; 272 return ret;
274} 273}
@@ -287,7 +286,6 @@ static int __devexit ds3232_remove(struct i2c_client *client)
287 } 286 }
288 287
289 rtc_device_unregister(ds3232->rtc); 288 rtc_device_unregister(ds3232->rtc);
290 i2c_set_clientdata(client, NULL);
291 kfree(ds3232); 289 kfree(ds3232);
292 return 0; 290 return 0;
293} 291}
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index 66377f3e28b8..d60557cae8ef 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -364,7 +364,7 @@ static int m41t80_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *t)
364 t->time.tm_isdst = -1; 364 t->time.tm_isdst = -1;
365 t->enabled = !!(reg[M41T80_REG_ALARM_MON] & M41T80_ALMON_AFE); 365 t->enabled = !!(reg[M41T80_REG_ALARM_MON] & M41T80_ALMON_AFE);
366 t->pending = !!(reg[M41T80_REG_FLAGS] & M41T80_FLAGS_AF); 366 t->pending = !!(reg[M41T80_REG_FLAGS] & M41T80_FLAGS_AF);
367 return rtc_valid_tm(t); 367 return 0;
368} 368}
369 369
370static struct rtc_class_ops m41t80_rtc_ops = { 370static struct rtc_class_ops m41t80_rtc_ops = {
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
index 6c418fe7f288..b7a6690e5b35 100644
--- a/drivers/rtc/rtc-pl031.c
+++ b/drivers/rtc/rtc-pl031.c
@@ -403,7 +403,7 @@ static int pl031_probe(struct amba_device *adev, struct amba_id *id)
403 } 403 }
404 404
405 if (request_irq(adev->irq[0], pl031_interrupt, 405 if (request_irq(adev->irq[0], pl031_interrupt,
406 IRQF_DISABLED | IRQF_SHARED, "rtc-pl031", ldata)) { 406 IRQF_DISABLED, "rtc-pl031", ldata)) {
407 ret = -EIO; 407 ret = -EIO;
408 goto out_no_irq; 408 goto out_no_irq;
409 } 409 }
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index a0d3ec89d412..f57a87f4ae96 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -310,11 +310,6 @@ static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
310 310
311 s3c_rtc_setaie(alrm->enabled); 311 s3c_rtc_setaie(alrm->enabled);
312 312
313 if (alrm->enabled)
314 enable_irq_wake(s3c_rtc_alarmno);
315 else
316 disable_irq_wake(s3c_rtc_alarmno);
317
318 return 0; 313 return 0;
319} 314}
320 315
@@ -587,6 +582,10 @@ static int s3c_rtc_suspend(struct platform_device *pdev, pm_message_t state)
587 ticnt_en_save &= S3C64XX_RTCCON_TICEN; 582 ticnt_en_save &= S3C64XX_RTCCON_TICEN;
588 } 583 }
589 s3c_rtc_enable(pdev, 0); 584 s3c_rtc_enable(pdev, 0);
585
586 if (device_may_wakeup(&pdev->dev))
587 enable_irq_wake(s3c_rtc_alarmno);
588
590 return 0; 589 return 0;
591} 590}
592 591
@@ -600,6 +599,10 @@ static int s3c_rtc_resume(struct platform_device *pdev)
600 tmp = readb(s3c_rtc_base + S3C2410_RTCCON); 599 tmp = readb(s3c_rtc_base + S3C2410_RTCCON);
601 writeb(tmp | ticnt_en_save, s3c_rtc_base + S3C2410_RTCCON); 600 writeb(tmp | ticnt_en_save, s3c_rtc_base + S3C2410_RTCCON);
602 } 601 }
602
603 if (device_may_wakeup(&pdev->dev))
604 disable_irq_wake(s3c_rtc_alarmno);
605
603 return 0; 606 return 0;
604} 607}
605#else 608#else
diff --git a/drivers/s390/char/ctrlchar.c b/drivers/s390/char/ctrlchar.c
index c6cbcb3f925e..0e9a309b9669 100644
--- a/drivers/s390/char/ctrlchar.c
+++ b/drivers/s390/char/ctrlchar.c
@@ -16,12 +16,11 @@
16 16
17#ifdef CONFIG_MAGIC_SYSRQ 17#ifdef CONFIG_MAGIC_SYSRQ
18static int ctrlchar_sysrq_key; 18static int ctrlchar_sysrq_key;
19static struct tty_struct *sysrq_tty;
20 19
21static void 20static void
22ctrlchar_handle_sysrq(struct work_struct *work) 21ctrlchar_handle_sysrq(struct work_struct *work)
23{ 22{
24 handle_sysrq(ctrlchar_sysrq_key, sysrq_tty); 23 handle_sysrq(ctrlchar_sysrq_key);
25} 24}
26 25
27static DECLARE_WORK(ctrlchar_work, ctrlchar_handle_sysrq); 26static DECLARE_WORK(ctrlchar_work, ctrlchar_handle_sysrq);
@@ -54,7 +53,6 @@ ctrlchar_handle(const unsigned char *buf, int len, struct tty_struct *tty)
54 /* racy */ 53 /* racy */
55 if (len == 3 && buf[1] == '-') { 54 if (len == 3 && buf[1] == '-') {
56 ctrlchar_sysrq_key = buf[2]; 55 ctrlchar_sysrq_key = buf[2];
57 sysrq_tty = tty;
58 schedule_work(&ctrlchar_work); 56 schedule_work(&ctrlchar_work);
59 return CTRLCHAR_SYSRQ; 57 return CTRLCHAR_SYSRQ;
60 } 58 }
diff --git a/drivers/s390/char/keyboard.c b/drivers/s390/char/keyboard.c
index 18d9a497863b..8cd58e412b5e 100644
--- a/drivers/s390/char/keyboard.c
+++ b/drivers/s390/char/keyboard.c
@@ -305,7 +305,7 @@ kbd_keycode(struct kbd_data *kbd, unsigned int keycode)
305 if (kbd->sysrq) { 305 if (kbd->sysrq) {
306 if (kbd->sysrq == K(KT_LATIN, '-')) { 306 if (kbd->sysrq == K(KT_LATIN, '-')) {
307 kbd->sysrq = 0; 307 kbd->sysrq = 0;
308 handle_sysrq(value, kbd->tty); 308 handle_sysrq(value);
309 return; 309 return;
310 } 310 }
311 if (value == '-') { 311 if (value == '-') {
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index b7de02525ec9..85cf607fc78f 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -217,8 +217,7 @@ tapeblock_setup_device(struct tape_device * device)
217 if (!blkdat->request_queue) 217 if (!blkdat->request_queue)
218 return -ENOMEM; 218 return -ENOMEM;
219 219
220 elevator_exit(blkdat->request_queue->elevator); 220 rc = elevator_change(blkdat->request_queue, "noop");
221 rc = elevator_init(blkdat->request_queue, "noop");
222 if (rc) 221 if (rc)
223 goto cleanup_queue; 222 goto cleanup_queue;
224 223
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index 6edf20b62de5..2c7d2d9be4d0 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -1154,7 +1154,7 @@ static struct net_device *ctcm_init_netdevice(struct ctcm_priv *priv)
1154 dev_fsm, dev_fsm_len, GFP_KERNEL); 1154 dev_fsm, dev_fsm_len, GFP_KERNEL);
1155 if (priv->fsm == NULL) { 1155 if (priv->fsm == NULL) {
1156 CTCMY_DBF_DEV(SETUP, dev, "init_fsm error"); 1156 CTCMY_DBF_DEV(SETUP, dev, "init_fsm error");
1157 kfree(dev); 1157 free_netdev(dev);
1158 return NULL; 1158 return NULL;
1159 } 1159 }
1160 fsm_newstate(priv->fsm, DEV_STATE_STOPPED); 1160 fsm_newstate(priv->fsm, DEV_STATE_STOPPED);
@@ -1165,7 +1165,7 @@ static struct net_device *ctcm_init_netdevice(struct ctcm_priv *priv)
1165 grp = ctcmpc_init_mpc_group(priv); 1165 grp = ctcmpc_init_mpc_group(priv);
1166 if (grp == NULL) { 1166 if (grp == NULL) {
1167 MPC_DBF_DEV(SETUP, dev, "init_mpc_group error"); 1167 MPC_DBF_DEV(SETUP, dev, "init_mpc_group error");
1168 kfree(dev); 1168 free_netdev(dev);
1169 return NULL; 1169 return NULL;
1170 } 1170 }
1171 tasklet_init(&grp->mpc_tasklet2, 1171 tasklet_init(&grp->mpc_tasklet2,
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index 7d4d2275573c..7f11f3e48e12 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -300,8 +300,7 @@ int beiscsi_get_host_param(struct Scsi_Host *shost,
300 enum iscsi_host_param param, char *buf) 300 enum iscsi_host_param param, char *buf)
301{ 301{
302 struct beiscsi_hba *phba = (struct beiscsi_hba *)iscsi_host_priv(shost); 302 struct beiscsi_hba *phba = (struct beiscsi_hba *)iscsi_host_priv(shost);
303 int len = 0; 303 int status = 0;
304 int status;
305 304
306 SE_DEBUG(DBG_LVL_8, "In beiscsi_get_host_param, param= %d\n", param); 305 SE_DEBUG(DBG_LVL_8, "In beiscsi_get_host_param, param= %d\n", param);
307 switch (param) { 306 switch (param) {
@@ -315,7 +314,7 @@ int beiscsi_get_host_param(struct Scsi_Host *shost,
315 default: 314 default:
316 return iscsi_host_get_param(shost, param, buf); 315 return iscsi_host_get_param(shost, param, buf);
317 } 316 }
318 return len; 317 return status;
319} 318}
320 319
321int beiscsi_get_macaddr(char *buf, struct beiscsi_hba *phba) 320int beiscsi_get_macaddr(char *buf, struct beiscsi_hba *phba)
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index 26350e470bcc..877324fc594c 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -368,7 +368,7 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
368 memset(req, 0, sizeof(*req)); 368 memset(req, 0, sizeof(*req));
369 wrb->tag0 |= tag; 369 wrb->tag0 |= tag;
370 370
371 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 1); 371 be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
372 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, 372 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
373 OPCODE_COMMON_ISCSI_TCP_CONNECT_AND_OFFLOAD, 373 OPCODE_COMMON_ISCSI_TCP_CONNECT_AND_OFFLOAD,
374 sizeof(*req)); 374 sizeof(*req));
diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c
index cd05e049d5f6..d0c82340f0e2 100644
--- a/drivers/scsi/constants.c
+++ b/drivers/scsi/constants.c
@@ -1404,13 +1404,13 @@ void scsi_print_sense(char *name, struct scsi_cmnd *cmd)
1404{ 1404{
1405 struct scsi_sense_hdr sshdr; 1405 struct scsi_sense_hdr sshdr;
1406 1406
1407 scmd_printk(KERN_INFO, cmd, ""); 1407 scmd_printk(KERN_INFO, cmd, " ");
1408 scsi_decode_sense_buffer(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE, 1408 scsi_decode_sense_buffer(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE,
1409 &sshdr); 1409 &sshdr);
1410 scsi_show_sense_hdr(&sshdr); 1410 scsi_show_sense_hdr(&sshdr);
1411 scsi_decode_sense_extras(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE, 1411 scsi_decode_sense_extras(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE,
1412 &sshdr); 1412 &sshdr);
1413 scmd_printk(KERN_INFO, cmd, ""); 1413 scmd_printk(KERN_INFO, cmd, " ");
1414 scsi_show_extd_sense(sshdr.asc, sshdr.ascq); 1414 scsi_show_extd_sense(sshdr.asc, sshdr.ascq);
1415} 1415}
1416EXPORT_SYMBOL(scsi_print_sense); 1416EXPORT_SYMBOL(scsi_print_sense);
@@ -1453,7 +1453,7 @@ EXPORT_SYMBOL(scsi_show_result);
1453 1453
1454void scsi_print_result(struct scsi_cmnd *cmd) 1454void scsi_print_result(struct scsi_cmnd *cmd)
1455{ 1455{
1456 scmd_printk(KERN_INFO, cmd, ""); 1456 scmd_printk(KERN_INFO, cmd, " ");
1457 scsi_show_result(cmd->result); 1457 scsi_show_result(cmd->result);
1458} 1458}
1459EXPORT_SYMBOL(scsi_print_result); 1459EXPORT_SYMBOL(scsi_print_result);
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 4f5551b5fe53..c5d0606ad097 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -3231,6 +3231,12 @@ static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
3231 misc_fw_support = readl(&cfgtable->misc_fw_support); 3231 misc_fw_support = readl(&cfgtable->misc_fw_support);
3232 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET; 3232 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
3233 3233
3234 /* The doorbell reset seems to cause lockups on some Smart
3235 * Arrays (e.g. P410, P410i, maybe others). Until this is
3236 * fixed or at least isolated, avoid the doorbell reset.
3237 */
3238 use_doorbell = 0;
3239
3234 rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell); 3240 rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
3235 if (rc) 3241 if (rc)
3236 goto unmap_cfgtable; 3242 goto unmap_cfgtable;
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index fda4de3440c4..e88bbdde49c5 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -865,7 +865,7 @@ void osd_req_read(struct osd_request *or,
865{ 865{
866 _osd_req_encode_common(or, OSD_ACT_READ, obj, offset, len); 866 _osd_req_encode_common(or, OSD_ACT_READ, obj, offset, len);
867 WARN_ON(or->in.bio || or->in.total_bytes); 867 WARN_ON(or->in.bio || or->in.total_bytes);
868 WARN_ON(1 == (bio->bi_rw & REQ_WRITE)); 868 WARN_ON(bio->bi_rw & REQ_WRITE);
869 or->in.bio = bio; 869 or->in.bio = bio;
870 or->in.total_bytes = len; 870 or->in.total_bytes = len;
871} 871}
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 420238cc794e..114bc5a81171 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1838,26 +1838,33 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
1838 1838
1839 qla24xx_disable_vp(vha); 1839 qla24xx_disable_vp(vha);
1840 1840
1841 vha->flags.delete_progress = 1;
1842
1841 fc_remove_host(vha->host); 1843 fc_remove_host(vha->host);
1842 1844
1843 scsi_remove_host(vha->host); 1845 scsi_remove_host(vha->host);
1844 1846
1845 qla2x00_free_fcports(vha); 1847 if (vha->timer_active) {
1848 qla2x00_vp_stop_timer(vha);
1849 DEBUG15(printk(KERN_INFO "scsi(%ld): timer for the vport[%d]"
1850 " = %p has stopped\n", vha->host_no, vha->vp_idx, vha));
1851 }
1846 1852
1847 qla24xx_deallocate_vp_id(vha); 1853 qla24xx_deallocate_vp_id(vha);
1848 1854
1855 /* No pending activities shall be there on the vha now */
1856 DEBUG(msleep(random32()%10)); /* Just to see if something falls on
1857 * the net we have placed below */
1858
1859 BUG_ON(atomic_read(&vha->vref_count));
1860
1861 qla2x00_free_fcports(vha);
1862
1849 mutex_lock(&ha->vport_lock); 1863 mutex_lock(&ha->vport_lock);
1850 ha->cur_vport_count--; 1864 ha->cur_vport_count--;
1851 clear_bit(vha->vp_idx, ha->vp_idx_map); 1865 clear_bit(vha->vp_idx, ha->vp_idx_map);
1852 mutex_unlock(&ha->vport_lock); 1866 mutex_unlock(&ha->vport_lock);
1853 1867
1854 if (vha->timer_active) {
1855 qla2x00_vp_stop_timer(vha);
1856 DEBUG15(printk ("scsi(%ld): timer for the vport[%d] = %p "
1857 "has stopped\n",
1858 vha->host_no, vha->vp_idx, vha));
1859 }
1860
1861 if (vha->req->id && !ha->flags.cpu_affinity_enabled) { 1868 if (vha->req->id && !ha->flags.cpu_affinity_enabled) {
1862 if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS) 1869 if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS)
1863 qla_printk(KERN_WARNING, ha, 1870 qla_printk(KERN_WARNING, ha,
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index 6cfc28a25eb3..b74e6b5743dc 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -29,8 +29,6 @@
29/* #define QL_DEBUG_LEVEL_17 */ /* Output EEH trace messages */ 29/* #define QL_DEBUG_LEVEL_17 */ /* Output EEH trace messages */
30/* #define QL_DEBUG_LEVEL_18 */ /* Output T10 CRC trace messages */ 30/* #define QL_DEBUG_LEVEL_18 */ /* Output T10 CRC trace messages */
31 31
32/* #define QL_PRINTK_BUF */ /* Captures printk to buffer */
33
34/* 32/*
35* Macros use for debugging the driver. 33* Macros use for debugging the driver.
36*/ 34*/
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 3a432ea0c7a3..d2a4e1530708 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -2641,6 +2641,7 @@ struct qla_hw_data {
2641#define MBX_UPDATE_FLASH_ACTIVE 3 2641#define MBX_UPDATE_FLASH_ACTIVE 3
2642 2642
2643 struct mutex vport_lock; /* Virtual port synchronization */ 2643 struct mutex vport_lock; /* Virtual port synchronization */
2644 spinlock_t vport_slock; /* order is hardware_lock, then vport_slock */
2644 struct completion mbx_cmd_comp; /* Serialize mbx access */ 2645 struct completion mbx_cmd_comp; /* Serialize mbx access */
2645 struct completion mbx_intr_comp; /* Used for completion notification */ 2646 struct completion mbx_intr_comp; /* Used for completion notification */
2646 struct completion dcbx_comp; /* For set port config notification */ 2647 struct completion dcbx_comp; /* For set port config notification */
@@ -2828,6 +2829,7 @@ typedef struct scsi_qla_host {
2828 uint32_t management_server_logged_in :1; 2829 uint32_t management_server_logged_in :1;
2829 uint32_t process_response_queue :1; 2830 uint32_t process_response_queue :1;
2830 uint32_t difdix_supported:1; 2831 uint32_t difdix_supported:1;
2832 uint32_t delete_progress:1;
2831 } flags; 2833 } flags;
2832 2834
2833 atomic_t loop_state; 2835 atomic_t loop_state;
@@ -2922,6 +2924,8 @@ typedef struct scsi_qla_host {
2922 struct req_que *req; 2924 struct req_que *req;
2923 int fw_heartbeat_counter; 2925 int fw_heartbeat_counter;
2924 int seconds_since_last_heartbeat; 2926 int seconds_since_last_heartbeat;
2927
2928 atomic_t vref_count;
2925} scsi_qla_host_t; 2929} scsi_qla_host_t;
2926 2930
2927/* 2931/*
@@ -2932,6 +2936,22 @@ typedef struct scsi_qla_host {
2932 test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) || \ 2936 test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) || \
2933 atomic_read(&ha->loop_state) == LOOP_DOWN) 2937 atomic_read(&ha->loop_state) == LOOP_DOWN)
2934 2938
2939#define QLA_VHA_MARK_BUSY(__vha, __bail) do { \
2940 atomic_inc(&__vha->vref_count); \
2941 mb(); \
2942 if (__vha->flags.delete_progress) { \
2943 atomic_dec(&__vha->vref_count); \
2944 __bail = 1; \
2945 } else { \
2946 __bail = 0; \
2947 } \
2948} while (0)
2949
2950#define QLA_VHA_MARK_NOT_BUSY(__vha) do { \
2951 atomic_dec(&__vha->vref_count); \
2952} while (0)
2953
2954
2935#define qla_printk(level, ha, format, arg...) \ 2955#define qla_printk(level, ha, format, arg...) \
2936 dev_printk(level , &((ha)->pdev->dev) , format , ## arg) 2956 dev_printk(level , &((ha)->pdev->dev) , format , ## arg)
2937 2957
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index d863ed2619b5..9c383baebe27 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -69,21 +69,29 @@ qla2x00_ctx_sp_free(srb_t *sp)
69{ 69{
70 struct srb_ctx *ctx = sp->ctx; 70 struct srb_ctx *ctx = sp->ctx;
71 struct srb_iocb *iocb = ctx->u.iocb_cmd; 71 struct srb_iocb *iocb = ctx->u.iocb_cmd;
72 struct scsi_qla_host *vha = sp->fcport->vha;
72 73
73 del_timer_sync(&iocb->timer); 74 del_timer_sync(&iocb->timer);
74 kfree(iocb); 75 kfree(iocb);
75 kfree(ctx); 76 kfree(ctx);
76 mempool_free(sp, sp->fcport->vha->hw->srb_mempool); 77 mempool_free(sp, sp->fcport->vha->hw->srb_mempool);
78
79 QLA_VHA_MARK_NOT_BUSY(vha);
77} 80}
78 81
79inline srb_t * 82inline srb_t *
80qla2x00_get_ctx_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size, 83qla2x00_get_ctx_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size,
81 unsigned long tmo) 84 unsigned long tmo)
82{ 85{
83 srb_t *sp; 86 srb_t *sp = NULL;
84 struct qla_hw_data *ha = vha->hw; 87 struct qla_hw_data *ha = vha->hw;
85 struct srb_ctx *ctx; 88 struct srb_ctx *ctx;
86 struct srb_iocb *iocb; 89 struct srb_iocb *iocb;
90 uint8_t bail;
91
92 QLA_VHA_MARK_BUSY(vha, bail);
93 if (bail)
94 return NULL;
87 95
88 sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL); 96 sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
89 if (!sp) 97 if (!sp)
@@ -116,6 +124,8 @@ qla2x00_get_ctx_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size,
116 iocb->timer.function = qla2x00_ctx_sp_timeout; 124 iocb->timer.function = qla2x00_ctx_sp_timeout;
117 add_timer(&iocb->timer); 125 add_timer(&iocb->timer);
118done: 126done:
127 if (!sp)
128 QLA_VHA_MARK_NOT_BUSY(vha);
119 return sp; 129 return sp;
120} 130}
121 131
@@ -1777,11 +1787,15 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
1777 qla2x00_init_response_q_entries(rsp); 1787 qla2x00_init_response_q_entries(rsp);
1778 } 1788 }
1779 1789
1790 spin_lock_irqsave(&ha->vport_slock, flags);
1780 /* Clear RSCN queue. */ 1791 /* Clear RSCN queue. */
1781 list_for_each_entry(vp, &ha->vp_list, list) { 1792 list_for_each_entry(vp, &ha->vp_list, list) {
1782 vp->rscn_in_ptr = 0; 1793 vp->rscn_in_ptr = 0;
1783 vp->rscn_out_ptr = 0; 1794 vp->rscn_out_ptr = 0;
1784 } 1795 }
1796
1797 spin_unlock_irqrestore(&ha->vport_slock, flags);
1798
1785 ha->isp_ops->config_rings(vha); 1799 ha->isp_ops->config_rings(vha);
1786 1800
1787 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1801 spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -3218,12 +3232,17 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3218 /* Bypass virtual ports of the same host. */ 3232 /* Bypass virtual ports of the same host. */
3219 found = 0; 3233 found = 0;
3220 if (ha->num_vhosts) { 3234 if (ha->num_vhosts) {
3235 unsigned long flags;
3236
3237 spin_lock_irqsave(&ha->vport_slock, flags);
3221 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { 3238 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
3222 if (new_fcport->d_id.b24 == vp->d_id.b24) { 3239 if (new_fcport->d_id.b24 == vp->d_id.b24) {
3223 found = 1; 3240 found = 1;
3224 break; 3241 break;
3225 } 3242 }
3226 } 3243 }
3244 spin_unlock_irqrestore(&ha->vport_slock, flags);
3245
3227 if (found) 3246 if (found)
3228 continue; 3247 continue;
3229 } 3248 }
@@ -3343,6 +3362,7 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
3343 struct qla_hw_data *ha = vha->hw; 3362 struct qla_hw_data *ha = vha->hw;
3344 struct scsi_qla_host *vp; 3363 struct scsi_qla_host *vp;
3345 struct scsi_qla_host *tvp; 3364 struct scsi_qla_host *tvp;
3365 unsigned long flags = 0;
3346 3366
3347 rval = QLA_SUCCESS; 3367 rval = QLA_SUCCESS;
3348 3368
@@ -3367,6 +3387,8 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
3367 /* Check for loop ID being already in use. */ 3387 /* Check for loop ID being already in use. */
3368 found = 0; 3388 found = 0;
3369 fcport = NULL; 3389 fcport = NULL;
3390
3391 spin_lock_irqsave(&ha->vport_slock, flags);
3370 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { 3392 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
3371 list_for_each_entry(fcport, &vp->vp_fcports, list) { 3393 list_for_each_entry(fcport, &vp->vp_fcports, list) {
3372 if (fcport->loop_id == dev->loop_id && 3394 if (fcport->loop_id == dev->loop_id &&
@@ -3379,6 +3401,7 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
3379 if (found) 3401 if (found)
3380 break; 3402 break;
3381 } 3403 }
3404 spin_unlock_irqrestore(&ha->vport_slock, flags);
3382 3405
3383 /* If not in use then it is free to use. */ 3406 /* If not in use then it is free to use. */
3384 if (!found) { 3407 if (!found) {
@@ -3791,14 +3814,27 @@ void
3791qla2x00_update_fcports(scsi_qla_host_t *base_vha) 3814qla2x00_update_fcports(scsi_qla_host_t *base_vha)
3792{ 3815{
3793 fc_port_t *fcport; 3816 fc_port_t *fcport;
3794 struct scsi_qla_host *tvp, *vha; 3817 struct scsi_qla_host *vha;
3818 struct qla_hw_data *ha = base_vha->hw;
3819 unsigned long flags;
3795 3820
3821 spin_lock_irqsave(&ha->vport_slock, flags);
3796 /* Go with deferred removal of rport references. */ 3822 /* Go with deferred removal of rport references. */
3797 list_for_each_entry_safe(vha, tvp, &base_vha->hw->vp_list, list) 3823 list_for_each_entry(vha, &base_vha->hw->vp_list, list) {
3798 list_for_each_entry(fcport, &vha->vp_fcports, list) 3824 atomic_inc(&vha->vref_count);
3825 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3799 if (fcport && fcport->drport && 3826 if (fcport && fcport->drport &&
3800 atomic_read(&fcport->state) != FCS_UNCONFIGURED) 3827 atomic_read(&fcport->state) != FCS_UNCONFIGURED) {
3828 spin_unlock_irqrestore(&ha->vport_slock, flags);
3829
3801 qla2x00_rport_del(fcport); 3830 qla2x00_rport_del(fcport);
3831
3832 spin_lock_irqsave(&ha->vport_slock, flags);
3833 }
3834 }
3835 atomic_dec(&vha->vref_count);
3836 }
3837 spin_unlock_irqrestore(&ha->vport_slock, flags);
3802} 3838}
3803 3839
3804void 3840void
@@ -3806,7 +3842,7 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
3806{ 3842{
3807 struct qla_hw_data *ha = vha->hw; 3843 struct qla_hw_data *ha = vha->hw;
3808 struct scsi_qla_host *vp, *base_vha = pci_get_drvdata(ha->pdev); 3844 struct scsi_qla_host *vp, *base_vha = pci_get_drvdata(ha->pdev);
3809 struct scsi_qla_host *tvp; 3845 unsigned long flags;
3810 3846
3811 vha->flags.online = 0; 3847 vha->flags.online = 0;
3812 ha->flags.chip_reset_done = 0; 3848 ha->flags.chip_reset_done = 0;
@@ -3824,8 +3860,18 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
3824 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 3860 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
3825 atomic_set(&vha->loop_state, LOOP_DOWN); 3861 atomic_set(&vha->loop_state, LOOP_DOWN);
3826 qla2x00_mark_all_devices_lost(vha, 0); 3862 qla2x00_mark_all_devices_lost(vha, 0);
3827 list_for_each_entry_safe(vp, tvp, &base_vha->hw->vp_list, list) 3863
3864 spin_lock_irqsave(&ha->vport_slock, flags);
3865 list_for_each_entry(vp, &base_vha->hw->vp_list, list) {
3866 atomic_inc(&vp->vref_count);
3867 spin_unlock_irqrestore(&ha->vport_slock, flags);
3868
3828 qla2x00_mark_all_devices_lost(vp, 0); 3869 qla2x00_mark_all_devices_lost(vp, 0);
3870
3871 spin_lock_irqsave(&ha->vport_slock, flags);
3872 atomic_dec(&vp->vref_count);
3873 }
3874 spin_unlock_irqrestore(&ha->vport_slock, flags);
3829 } else { 3875 } else {
3830 if (!atomic_read(&vha->loop_down_timer)) 3876 if (!atomic_read(&vha->loop_down_timer))
3831 atomic_set(&vha->loop_down_timer, 3877 atomic_set(&vha->loop_down_timer,
@@ -3862,8 +3908,8 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
3862 uint8_t status = 0; 3908 uint8_t status = 0;
3863 struct qla_hw_data *ha = vha->hw; 3909 struct qla_hw_data *ha = vha->hw;
3864 struct scsi_qla_host *vp; 3910 struct scsi_qla_host *vp;
3865 struct scsi_qla_host *tvp;
3866 struct req_que *req = ha->req_q_map[0]; 3911 struct req_que *req = ha->req_q_map[0];
3912 unsigned long flags;
3867 3913
3868 if (vha->flags.online) { 3914 if (vha->flags.online) {
3869 qla2x00_abort_isp_cleanup(vha); 3915 qla2x00_abort_isp_cleanup(vha);
@@ -3970,10 +4016,21 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
3970 DEBUG(printk(KERN_INFO 4016 DEBUG(printk(KERN_INFO
3971 "qla2x00_abort_isp(%ld): succeeded.\n", 4017 "qla2x00_abort_isp(%ld): succeeded.\n",
3972 vha->host_no)); 4018 vha->host_no));
3973 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { 4019
3974 if (vp->vp_idx) 4020 spin_lock_irqsave(&ha->vport_slock, flags);
4021 list_for_each_entry(vp, &ha->vp_list, list) {
4022 if (vp->vp_idx) {
4023 atomic_inc(&vp->vref_count);
4024 spin_unlock_irqrestore(&ha->vport_slock, flags);
4025
3975 qla2x00_vp_abort_isp(vp); 4026 qla2x00_vp_abort_isp(vp);
4027
4028 spin_lock_irqsave(&ha->vport_slock, flags);
4029 atomic_dec(&vp->vref_count);
4030 }
3976 } 4031 }
4032 spin_unlock_irqrestore(&ha->vport_slock, flags);
4033
3977 } else { 4034 } else {
3978 qla_printk(KERN_INFO, ha, 4035 qla_printk(KERN_INFO, ha,
3979 "qla2x00_abort_isp: **** FAILED ****\n"); 4036 "qla2x00_abort_isp: **** FAILED ****\n");
@@ -5185,7 +5242,7 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
5185 struct req_que *req = ha->req_q_map[0]; 5242 struct req_que *req = ha->req_q_map[0];
5186 struct rsp_que *rsp = ha->rsp_q_map[0]; 5243 struct rsp_que *rsp = ha->rsp_q_map[0];
5187 struct scsi_qla_host *vp; 5244 struct scsi_qla_host *vp;
5188 struct scsi_qla_host *tvp; 5245 unsigned long flags;
5189 5246
5190 status = qla2x00_init_rings(vha); 5247 status = qla2x00_init_rings(vha);
5191 if (!status) { 5248 if (!status) {
@@ -5272,10 +5329,21 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
5272 DEBUG(printk(KERN_INFO 5329 DEBUG(printk(KERN_INFO
5273 "qla82xx_restart_isp(%ld): succeeded.\n", 5330 "qla82xx_restart_isp(%ld): succeeded.\n",
5274 vha->host_no)); 5331 vha->host_no));
5275 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { 5332
5276 if (vp->vp_idx) 5333 spin_lock_irqsave(&ha->vport_slock, flags);
5334 list_for_each_entry(vp, &ha->vp_list, list) {
5335 if (vp->vp_idx) {
5336 atomic_inc(&vp->vref_count);
5337 spin_unlock_irqrestore(&ha->vport_slock, flags);
5338
5277 qla2x00_vp_abort_isp(vp); 5339 qla2x00_vp_abort_isp(vp);
5340
5341 spin_lock_irqsave(&ha->vport_slock, flags);
5342 atomic_dec(&vp->vref_count);
5343 }
5278 } 5344 }
5345 spin_unlock_irqrestore(&ha->vport_slock, flags);
5346
5279 } else { 5347 } else {
5280 qla_printk(KERN_INFO, ha, 5348 qla_printk(KERN_INFO, ha,
5281 "qla82xx_restart_isp: **** FAILED ****\n"); 5349 "qla82xx_restart_isp: **** FAILED ****\n");
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 6982ba70e12a..28f65be19dad 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1706,19 +1706,20 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1706 cp->result = DID_ERROR << 16; 1706 cp->result = DID_ERROR << 16;
1707 break; 1707 break;
1708 } 1708 }
1709 } else if (!lscsi_status) { 1709 } else {
1710 DEBUG2(qla_printk(KERN_INFO, ha, 1710 DEBUG2(qla_printk(KERN_INFO, ha,
1711 "scsi(%ld:%d:%d) Dropped frame(s) detected (0x%x " 1711 "scsi(%ld:%d:%d) Dropped frame(s) detected (0x%x "
1712 "of 0x%x bytes).\n", vha->host_no, cp->device->id, 1712 "of 0x%x bytes).\n", vha->host_no, cp->device->id,
1713 cp->device->lun, resid, scsi_bufflen(cp))); 1713 cp->device->lun, resid, scsi_bufflen(cp)));
1714 1714
1715 cp->result = DID_ERROR << 16; 1715 cp->result = DID_ERROR << 16 | lscsi_status;
1716 break; 1716 goto check_scsi_status;
1717 } 1717 }
1718 1718
1719 cp->result = DID_OK << 16 | lscsi_status; 1719 cp->result = DID_OK << 16 | lscsi_status;
1720 logit = 0; 1720 logit = 0;
1721 1721
1722check_scsi_status:
1722 /* 1723 /*
1723 * Check to see if SCSI Status is non zero. If so report SCSI 1724 * Check to see if SCSI Status is non zero. If so report SCSI
1724 * Status. 1725 * Status.
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 6009b0c69488..a595ec8264f8 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -2913,7 +2913,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
2913 uint16_t stat = le16_to_cpu(rptid_entry->vp_idx); 2913 uint16_t stat = le16_to_cpu(rptid_entry->vp_idx);
2914 struct qla_hw_data *ha = vha->hw; 2914 struct qla_hw_data *ha = vha->hw;
2915 scsi_qla_host_t *vp; 2915 scsi_qla_host_t *vp;
2916 scsi_qla_host_t *tvp; 2916 unsigned long flags;
2917 2917
2918 if (rptid_entry->entry_status != 0) 2918 if (rptid_entry->entry_status != 0)
2919 return; 2919 return;
@@ -2945,9 +2945,12 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
2945 return; 2945 return;
2946 } 2946 }
2947 2947
2948 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) 2948 spin_lock_irqsave(&ha->vport_slock, flags);
2949 list_for_each_entry(vp, &ha->vp_list, list)
2949 if (vp_idx == vp->vp_idx) 2950 if (vp_idx == vp->vp_idx)
2950 break; 2951 break;
2952 spin_unlock_irqrestore(&ha->vport_slock, flags);
2953
2951 if (!vp) 2954 if (!vp)
2952 return; 2955 return;
2953 2956
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 987c5b0ca78e..2b69392a71a1 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -30,6 +30,7 @@ qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
30{ 30{
31 uint32_t vp_id; 31 uint32_t vp_id;
32 struct qla_hw_data *ha = vha->hw; 32 struct qla_hw_data *ha = vha->hw;
33 unsigned long flags;
33 34
34 /* Find an empty slot and assign an vp_id */ 35 /* Find an empty slot and assign an vp_id */
35 mutex_lock(&ha->vport_lock); 36 mutex_lock(&ha->vport_lock);
@@ -44,7 +45,11 @@ qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
44 set_bit(vp_id, ha->vp_idx_map); 45 set_bit(vp_id, ha->vp_idx_map);
45 ha->num_vhosts++; 46 ha->num_vhosts++;
46 vha->vp_idx = vp_id; 47 vha->vp_idx = vp_id;
48
49 spin_lock_irqsave(&ha->vport_slock, flags);
47 list_add_tail(&vha->list, &ha->vp_list); 50 list_add_tail(&vha->list, &ha->vp_list);
51 spin_unlock_irqrestore(&ha->vport_slock, flags);
52
48 mutex_unlock(&ha->vport_lock); 53 mutex_unlock(&ha->vport_lock);
49 return vp_id; 54 return vp_id;
50} 55}
@@ -54,12 +59,31 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
54{ 59{
55 uint16_t vp_id; 60 uint16_t vp_id;
56 struct qla_hw_data *ha = vha->hw; 61 struct qla_hw_data *ha = vha->hw;
62 unsigned long flags = 0;
57 63
58 mutex_lock(&ha->vport_lock); 64 mutex_lock(&ha->vport_lock);
65 /*
66 * Wait for all pending activities to finish before removing vport from
67 * the list.
68 * Lock needs to be held for safe removal from the list (it
69 * ensures no active vp_list traversal while the vport is removed
70 * from the queue)
71 */
72 spin_lock_irqsave(&ha->vport_slock, flags);
73 while (atomic_read(&vha->vref_count)) {
74 spin_unlock_irqrestore(&ha->vport_slock, flags);
75
76 msleep(500);
77
78 spin_lock_irqsave(&ha->vport_slock, flags);
79 }
80 list_del(&vha->list);
81 spin_unlock_irqrestore(&ha->vport_slock, flags);
82
59 vp_id = vha->vp_idx; 83 vp_id = vha->vp_idx;
60 ha->num_vhosts--; 84 ha->num_vhosts--;
61 clear_bit(vp_id, ha->vp_idx_map); 85 clear_bit(vp_id, ha->vp_idx_map);
62 list_del(&vha->list); 86
63 mutex_unlock(&ha->vport_lock); 87 mutex_unlock(&ha->vport_lock);
64} 88}
65 89
@@ -68,12 +92,17 @@ qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name)
68{ 92{
69 scsi_qla_host_t *vha; 93 scsi_qla_host_t *vha;
70 struct scsi_qla_host *tvha; 94 struct scsi_qla_host *tvha;
95 unsigned long flags;
71 96
97 spin_lock_irqsave(&ha->vport_slock, flags);
72 /* Locate matching device in database. */ 98 /* Locate matching device in database. */
73 list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) { 99 list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) {
74 if (!memcmp(port_name, vha->port_name, WWN_SIZE)) 100 if (!memcmp(port_name, vha->port_name, WWN_SIZE)) {
101 spin_unlock_irqrestore(&ha->vport_slock, flags);
75 return vha; 102 return vha;
103 }
76 } 104 }
105 spin_unlock_irqrestore(&ha->vport_slock, flags);
77 return NULL; 106 return NULL;
78} 107}
79 108
@@ -93,6 +122,12 @@ qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name)
93static void 122static void
94qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha) 123qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
95{ 124{
125 /*
126 * !!! NOTE !!!
127 * This function, if called in contexts other than vp create, disable
128 * or delete, please make sure this is synchronized with the
129 * delete thread.
130 */
96 fc_port_t *fcport; 131 fc_port_t *fcport;
97 132
98 list_for_each_entry(fcport, &vha->vp_fcports, list) { 133 list_for_each_entry(fcport, &vha->vp_fcports, list) {
@@ -100,7 +135,6 @@ qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
100 "loop_id=0x%04x :%x\n", 135 "loop_id=0x%04x :%x\n",
101 vha->host_no, fcport->loop_id, fcport->vp_idx)); 136 vha->host_no, fcport->loop_id, fcport->vp_idx));
102 137
103 atomic_set(&fcport->state, FCS_DEVICE_DEAD);
104 qla2x00_mark_device_lost(vha, fcport, 0, 0); 138 qla2x00_mark_device_lost(vha, fcport, 0, 0);
105 atomic_set(&fcport->state, FCS_UNCONFIGURED); 139 atomic_set(&fcport->state, FCS_UNCONFIGURED);
106 } 140 }
@@ -194,12 +228,17 @@ qla24xx_configure_vp(scsi_qla_host_t *vha)
194void 228void
195qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb) 229qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
196{ 230{
197 scsi_qla_host_t *vha, *tvha; 231 scsi_qla_host_t *vha;
198 struct qla_hw_data *ha = rsp->hw; 232 struct qla_hw_data *ha = rsp->hw;
199 int i = 0; 233 int i = 0;
234 unsigned long flags;
200 235
201 list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) { 236 spin_lock_irqsave(&ha->vport_slock, flags);
237 list_for_each_entry(vha, &ha->vp_list, list) {
202 if (vha->vp_idx) { 238 if (vha->vp_idx) {
239 atomic_inc(&vha->vref_count);
240 spin_unlock_irqrestore(&ha->vport_slock, flags);
241
203 switch (mb[0]) { 242 switch (mb[0]) {
204 case MBA_LIP_OCCURRED: 243 case MBA_LIP_OCCURRED:
205 case MBA_LOOP_UP: 244 case MBA_LOOP_UP:
@@ -215,9 +254,13 @@ qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
215 qla2x00_async_event(vha, rsp, mb); 254 qla2x00_async_event(vha, rsp, mb);
216 break; 255 break;
217 } 256 }
257
258 spin_lock_irqsave(&ha->vport_slock, flags);
259 atomic_dec(&vha->vref_count);
218 } 260 }
219 i++; 261 i++;
220 } 262 }
263 spin_unlock_irqrestore(&ha->vport_slock, flags);
221} 264}
222 265
223int 266int
@@ -297,7 +340,7 @@ qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha)
297 int ret; 340 int ret;
298 struct qla_hw_data *ha = vha->hw; 341 struct qla_hw_data *ha = vha->hw;
299 scsi_qla_host_t *vp; 342 scsi_qla_host_t *vp;
300 struct scsi_qla_host *tvp; 343 unsigned long flags = 0;
301 344
302 if (vha->vp_idx) 345 if (vha->vp_idx)
303 return; 346 return;
@@ -309,10 +352,19 @@ qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha)
309 if (!(ha->current_topology & ISP_CFG_F)) 352 if (!(ha->current_topology & ISP_CFG_F))
310 return; 353 return;
311 354
312 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { 355 spin_lock_irqsave(&ha->vport_slock, flags);
313 if (vp->vp_idx) 356 list_for_each_entry(vp, &ha->vp_list, list) {
357 if (vp->vp_idx) {
358 atomic_inc(&vp->vref_count);
359 spin_unlock_irqrestore(&ha->vport_slock, flags);
360
314 ret = qla2x00_do_dpc_vp(vp); 361 ret = qla2x00_do_dpc_vp(vp);
362
363 spin_lock_irqsave(&ha->vport_slock, flags);
364 atomic_dec(&vp->vref_count);
365 }
315 } 366 }
367 spin_unlock_irqrestore(&ha->vport_slock, flags);
316} 368}
317 369
318int 370int
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 915b77a6e193..0a71cc71eab2 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -2672,6 +2672,19 @@ qla82xx_start_scsi(srb_t *sp)
2672sufficient_dsds: 2672sufficient_dsds:
2673 req_cnt = 1; 2673 req_cnt = 1;
2674 2674
2675 if (req->cnt < (req_cnt + 2)) {
2676 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2677 &reg->req_q_out[0]);
2678 if (req->ring_index < cnt)
2679 req->cnt = cnt - req->ring_index;
2680 else
2681 req->cnt = req->length -
2682 (req->ring_index - cnt);
2683 }
2684
2685 if (req->cnt < (req_cnt + 2))
2686 goto queuing_error;
2687
2675 ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC); 2688 ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
2676 if (!sp->ctx) { 2689 if (!sp->ctx) {
2677 DEBUG(printk(KERN_INFO 2690 DEBUG(printk(KERN_INFO
@@ -3307,16 +3320,19 @@ qla82xx_check_fw_alive(scsi_qla_host_t *vha)
3307 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3320 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3308 } 3321 }
3309 qla2xxx_wake_dpc(vha); 3322 qla2xxx_wake_dpc(vha);
3323 ha->flags.fw_hung = 1;
3310 if (ha->flags.mbox_busy) { 3324 if (ha->flags.mbox_busy) {
3311 ha->flags.fw_hung = 1;
3312 ha->flags.mbox_int = 1; 3325 ha->flags.mbox_int = 1;
3313 DEBUG2(qla_printk(KERN_ERR, ha, 3326 DEBUG2(qla_printk(KERN_ERR, ha,
3314 "Due to fw hung, doing premature " 3327 "Due to fw hung, doing premature "
3315 "completion of mbx command\n")); 3328 "completion of mbx command\n"));
3316 complete(&ha->mbx_intr_comp); 3329 if (test_bit(MBX_INTR_WAIT,
3330 &ha->mbx_cmd_flags))
3331 complete(&ha->mbx_intr_comp);
3317 } 3332 }
3318 } 3333 }
3319 } 3334 } else
3335 vha->seconds_since_last_heartbeat = 0;
3320 vha->fw_heartbeat_counter = fw_heartbeat_counter; 3336 vha->fw_heartbeat_counter = fw_heartbeat_counter;
3321} 3337}
3322 3338
@@ -3418,13 +3434,15 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
3418 "%s(): Adapter reset needed!\n", __func__); 3434 "%s(): Adapter reset needed!\n", __func__);
3419 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3435 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3420 qla2xxx_wake_dpc(vha); 3436 qla2xxx_wake_dpc(vha);
3437 ha->flags.fw_hung = 1;
3421 if (ha->flags.mbox_busy) { 3438 if (ha->flags.mbox_busy) {
3422 ha->flags.fw_hung = 1;
3423 ha->flags.mbox_int = 1; 3439 ha->flags.mbox_int = 1;
3424 DEBUG2(qla_printk(KERN_ERR, ha, 3440 DEBUG2(qla_printk(KERN_ERR, ha,
3425 "Need reset, doing premature " 3441 "Need reset, doing premature "
3426 "completion of mbx command\n")); 3442 "completion of mbx command\n"));
3427 complete(&ha->mbx_intr_comp); 3443 if (test_bit(MBX_INTR_WAIT,
3444 &ha->mbx_cmd_flags))
3445 complete(&ha->mbx_intr_comp);
3428 } 3446 }
3429 } else { 3447 } else {
3430 qla82xx_check_fw_alive(vha); 3448 qla82xx_check_fw_alive(vha);
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 8c80b49ac1c4..1e4bff695254 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -2341,16 +2341,28 @@ probe_out:
2341static void 2341static void
2342qla2x00_remove_one(struct pci_dev *pdev) 2342qla2x00_remove_one(struct pci_dev *pdev)
2343{ 2343{
2344 scsi_qla_host_t *base_vha, *vha, *temp; 2344 scsi_qla_host_t *base_vha, *vha;
2345 struct qla_hw_data *ha; 2345 struct qla_hw_data *ha;
2346 unsigned long flags;
2346 2347
2347 base_vha = pci_get_drvdata(pdev); 2348 base_vha = pci_get_drvdata(pdev);
2348 ha = base_vha->hw; 2349 ha = base_vha->hw;
2349 2350
2350 list_for_each_entry_safe(vha, temp, &ha->vp_list, list) { 2351 spin_lock_irqsave(&ha->vport_slock, flags);
2351 if (vha && vha->fc_vport) 2352 list_for_each_entry(vha, &ha->vp_list, list) {
2353 atomic_inc(&vha->vref_count);
2354
2355 if (vha && vha->fc_vport) {
2356 spin_unlock_irqrestore(&ha->vport_slock, flags);
2357
2352 fc_vport_terminate(vha->fc_vport); 2358 fc_vport_terminate(vha->fc_vport);
2359
2360 spin_lock_irqsave(&ha->vport_slock, flags);
2361 }
2362
2363 atomic_dec(&vha->vref_count);
2353 } 2364 }
2365 spin_unlock_irqrestore(&ha->vport_slock, flags);
2354 2366
2355 set_bit(UNLOADING, &base_vha->dpc_flags); 2367 set_bit(UNLOADING, &base_vha->dpc_flags);
2356 2368
@@ -2975,10 +2987,17 @@ static struct qla_work_evt *
2975qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type) 2987qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type)
2976{ 2988{
2977 struct qla_work_evt *e; 2989 struct qla_work_evt *e;
2990 uint8_t bail;
2991
2992 QLA_VHA_MARK_BUSY(vha, bail);
2993 if (bail)
2994 return NULL;
2978 2995
2979 e = kzalloc(sizeof(struct qla_work_evt), GFP_ATOMIC); 2996 e = kzalloc(sizeof(struct qla_work_evt), GFP_ATOMIC);
2980 if (!e) 2997 if (!e) {
2998 QLA_VHA_MARK_NOT_BUSY(vha);
2981 return NULL; 2999 return NULL;
3000 }
2982 3001
2983 INIT_LIST_HEAD(&e->list); 3002 INIT_LIST_HEAD(&e->list);
2984 e->type = type; 3003 e->type = type;
@@ -3135,6 +3154,9 @@ qla2x00_do_work(struct scsi_qla_host *vha)
3135 } 3154 }
3136 if (e->flags & QLA_EVT_FLAG_FREE) 3155 if (e->flags & QLA_EVT_FLAG_FREE)
3137 kfree(e); 3156 kfree(e);
3157
3158 /* For each work completed decrement vha ref count */
3159 QLA_VHA_MARK_NOT_BUSY(vha);
3138 } 3160 }
3139} 3161}
3140 3162
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index e75ccb91317d..8edbccb3232d 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,9 +7,9 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.03.03-k0" 10#define QLA2XXX_VERSION "8.03.04-k0"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 3 13#define QLA_DRIVER_MINOR_VER 3
14#define QLA_DRIVER_PATCH_VER 3 14#define QLA_DRIVER_PATCH_VER 4
15#define QLA_DRIVER_BETA_VER 0 15#define QLA_DRIVER_BETA_VER 0
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 9ade720422c6..ee02d3838a0a 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1011,8 +1011,8 @@ int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
1011 1011
1012err_exit: 1012err_exit:
1013 scsi_release_buffers(cmd); 1013 scsi_release_buffers(cmd);
1014 scsi_put_command(cmd);
1015 cmd->request->special = NULL; 1014 cmd->request->special = NULL;
1015 scsi_put_command(cmd);
1016 return error; 1016 return error;
1017} 1017}
1018EXPORT_SYMBOL(scsi_init_io); 1018EXPORT_SYMBOL(scsi_init_io);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 63bd01ae534f..20514c47a5aa 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -870,7 +870,7 @@ static int sd_release(struct gendisk *disk, fmode_t mode)
870 870
871 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_release\n")); 871 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_release\n"));
872 872
873 if (atomic_dec_return(&sdkp->openers) && sdev->removable) { 873 if (atomic_dec_return(&sdkp->openers) == 0 && sdev->removable) {
874 if (scsi_block_when_processing_errors(sdev)) 874 if (scsi_block_when_processing_errors(sdev))
875 scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW); 875 scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW);
876 } 876 }
@@ -2623,15 +2623,15 @@ module_exit(exit_sd);
2623static void sd_print_sense_hdr(struct scsi_disk *sdkp, 2623static void sd_print_sense_hdr(struct scsi_disk *sdkp,
2624 struct scsi_sense_hdr *sshdr) 2624 struct scsi_sense_hdr *sshdr)
2625{ 2625{
2626 sd_printk(KERN_INFO, sdkp, ""); 2626 sd_printk(KERN_INFO, sdkp, " ");
2627 scsi_show_sense_hdr(sshdr); 2627 scsi_show_sense_hdr(sshdr);
2628 sd_printk(KERN_INFO, sdkp, ""); 2628 sd_printk(KERN_INFO, sdkp, " ");
2629 scsi_show_extd_sense(sshdr->asc, sshdr->ascq); 2629 scsi_show_extd_sense(sshdr->asc, sshdr->ascq);
2630} 2630}
2631 2631
2632static void sd_print_result(struct scsi_disk *sdkp, int result) 2632static void sd_print_result(struct scsi_disk *sdkp, int result)
2633{ 2633{
2634 sd_printk(KERN_INFO, sdkp, ""); 2634 sd_printk(KERN_INFO, sdkp, " ");
2635 scsi_show_result(result); 2635 scsi_show_result(result);
2636} 2636}
2637 2637
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
index a7bc8b7b09ac..2c3e89ddf069 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.c
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
@@ -72,10 +72,7 @@ static void sym_printl_hex(u_char *p, int n)
72 72
73static void sym_print_msg(struct sym_ccb *cp, char *label, u_char *msg) 73static void sym_print_msg(struct sym_ccb *cp, char *label, u_char *msg)
74{ 74{
75 if (label) 75 sym_print_addr(cp->cmd, "%s: ", label);
76 sym_print_addr(cp->cmd, "%s: ", label);
77 else
78 sym_print_addr(cp->cmd, "");
79 76
80 spi_print_msg(msg); 77 spi_print_msg(msg);
81 printf("\n"); 78 printf("\n");
@@ -4558,7 +4555,8 @@ static void sym_int_sir(struct sym_hcb *np)
4558 switch (np->msgin [2]) { 4555 switch (np->msgin [2]) {
4559 case M_X_MODIFY_DP: 4556 case M_X_MODIFY_DP:
4560 if (DEBUG_FLAGS & DEBUG_POINTER) 4557 if (DEBUG_FLAGS & DEBUG_POINTER)
4561 sym_print_msg(cp, NULL, np->msgin); 4558 sym_print_msg(cp, "extended msg ",
4559 np->msgin);
4562 tmp = (np->msgin[3]<<24) + (np->msgin[4]<<16) + 4560 tmp = (np->msgin[3]<<24) + (np->msgin[4]<<16) +
4563 (np->msgin[5]<<8) + (np->msgin[6]); 4561 (np->msgin[5]<<8) + (np->msgin[6]);
4564 sym_modify_dp(np, tp, cp, tmp); 4562 sym_modify_dp(np, tp, cp, tmp);
@@ -4585,7 +4583,7 @@ static void sym_int_sir(struct sym_hcb *np)
4585 */ 4583 */
4586 case M_IGN_RESIDUE: 4584 case M_IGN_RESIDUE:
4587 if (DEBUG_FLAGS & DEBUG_POINTER) 4585 if (DEBUG_FLAGS & DEBUG_POINTER)
4588 sym_print_msg(cp, NULL, np->msgin); 4586 sym_print_msg(cp, "1 or 2 byte ", np->msgin);
4589 if (cp->host_flags & HF_SENSE) 4587 if (cp->host_flags & HF_SENSE)
4590 OUTL_DSP(np, SCRIPTA_BA(np, clrack)); 4588 OUTL_DSP(np, SCRIPTA_BA(np, clrack));
4591 else 4589 else
diff --git a/drivers/serial/68328serial.c b/drivers/serial/68328serial.c
index 7356a56ac458..be0ebce36e54 100644
--- a/drivers/serial/68328serial.c
+++ b/drivers/serial/68328serial.c
@@ -869,7 +869,9 @@ static int get_serial_info(struct m68k_serial * info,
869 tmp.close_delay = info->close_delay; 869 tmp.close_delay = info->close_delay;
870 tmp.closing_wait = info->closing_wait; 870 tmp.closing_wait = info->closing_wait;
871 tmp.custom_divisor = info->custom_divisor; 871 tmp.custom_divisor = info->custom_divisor;
872 copy_to_user(retinfo,&tmp,sizeof(*retinfo)); 872 if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
873 return -EFAULT;
874
873 return 0; 875 return 0;
874} 876}
875 877
@@ -882,7 +884,8 @@ static int set_serial_info(struct m68k_serial * info,
882 884
883 if (!new_info) 885 if (!new_info)
884 return -EFAULT; 886 return -EFAULT;
885 copy_from_user(&new_serial,new_info,sizeof(new_serial)); 887 if (copy_from_user(&new_serial, new_info, sizeof(new_serial)))
888 return -EFAULT;
886 old_info = *info; 889 old_info = *info;
887 890
888 if (!capable(CAP_SYS_ADMIN)) { 891 if (!capable(CAP_SYS_ADMIN)) {
@@ -943,8 +946,7 @@ static int get_lsr_info(struct m68k_serial * info, unsigned int *value)
943 status = 0; 946 status = 0;
944#endif 947#endif
945 local_irq_restore(flags); 948 local_irq_restore(flags);
946 put_user(status,value); 949 return put_user(status, value);
947 return 0;
948} 950}
949 951
950/* 952/*
@@ -999,27 +1001,18 @@ static int rs_ioctl(struct tty_struct *tty, struct file * file,
999 send_break(info, arg ? arg*(100) : 250); 1001 send_break(info, arg ? arg*(100) : 250);
1000 return 0; 1002 return 0;
1001 case TIOCGSERIAL: 1003 case TIOCGSERIAL:
1002 if (access_ok(VERIFY_WRITE, (void *) arg, 1004 return get_serial_info(info,
1003 sizeof(struct serial_struct))) 1005 (struct serial_struct *) arg);
1004 return get_serial_info(info,
1005 (struct serial_struct *) arg);
1006 return -EFAULT;
1007 case TIOCSSERIAL: 1006 case TIOCSSERIAL:
1008 return set_serial_info(info, 1007 return set_serial_info(info,
1009 (struct serial_struct *) arg); 1008 (struct serial_struct *) arg);
1010 case TIOCSERGETLSR: /* Get line status register */ 1009 case TIOCSERGETLSR: /* Get line status register */
1011 if (access_ok(VERIFY_WRITE, (void *) arg, 1010 return get_lsr_info(info, (unsigned int *) arg);
1012 sizeof(unsigned int)))
1013 return get_lsr_info(info, (unsigned int *) arg);
1014 return -EFAULT;
1015 case TIOCSERGSTRUCT: 1011 case TIOCSERGSTRUCT:
1016 if (!access_ok(VERIFY_WRITE, (void *) arg, 1012 if (copy_to_user((struct m68k_serial *) arg,
1017 sizeof(struct m68k_serial))) 1013 info, sizeof(struct m68k_serial)))
1018 return -EFAULT; 1014 return -EFAULT;
1019 copy_to_user((struct m68k_serial *) arg,
1020 info, sizeof(struct m68k_serial));
1021 return 0; 1015 return 0;
1022
1023 default: 1016 default:
1024 return -ENOIOCTLCMD; 1017 return -ENOIOCTLCMD;
1025 } 1018 }
diff --git a/drivers/serial/8250_early.c b/drivers/serial/8250_early.c
index b745792ec25a..eaafb98debed 100644
--- a/drivers/serial/8250_early.c
+++ b/drivers/serial/8250_early.c
@@ -203,13 +203,13 @@ static int __init parse_options(struct early_serial8250_device *device,
203 203
204 if (mmio || mmio32) 204 if (mmio || mmio32)
205 printk(KERN_INFO 205 printk(KERN_INFO
206 "Early serial console at MMIO%s 0x%llu (options '%s')\n", 206 "Early serial console at MMIO%s 0x%llx (options '%s')\n",
207 mmio32 ? "32" : "", 207 mmio32 ? "32" : "",
208 (unsigned long long)port->mapbase, 208 (unsigned long long)port->mapbase,
209 device->options); 209 device->options);
210 else 210 else
211 printk(KERN_INFO 211 printk(KERN_INFO
212 "Early serial console at I/O port 0x%lu (options '%s')\n", 212 "Early serial console at I/O port 0x%lx (options '%s')\n",
213 port->iobase, 213 port->iobase,
214 device->options); 214 device->options);
215 215
diff --git a/drivers/serial/amba-pl010.c b/drivers/serial/amba-pl010.c
index 50441ffe8e38..2904aa044126 100644
--- a/drivers/serial/amba-pl010.c
+++ b/drivers/serial/amba-pl010.c
@@ -472,14 +472,9 @@ pl010_set_termios(struct uart_port *port, struct ktermios *termios,
472 spin_unlock_irqrestore(&uap->port.lock, flags); 472 spin_unlock_irqrestore(&uap->port.lock, flags);
473} 473}
474 474
475static void pl010_set_ldisc(struct uart_port *port) 475static void pl010_set_ldisc(struct uart_port *port, int new)
476{ 476{
477 int line = port->line; 477 if (new == N_PPS) {
478
479 if (line >= port->state->port.tty->driver->num)
480 return;
481
482 if (port->state->port.tty->ldisc->ops->num == N_PPS) {
483 port->flags |= UPF_HARDPPS_CD; 478 port->flags |= UPF_HARDPPS_CD;
484 pl010_enable_ms(port); 479 pl010_enable_ms(port);
485 } else 480 } else
diff --git a/drivers/serial/bfin_sport_uart.c b/drivers/serial/bfin_sport_uart.c
index e57fb3d228e2..5318dd3774ae 100644
--- a/drivers/serial/bfin_sport_uart.c
+++ b/drivers/serial/bfin_sport_uart.c
@@ -121,7 +121,7 @@ static int sport_uart_setup(struct sport_uart_port *up, int size, int baud_rate)
121 unsigned int sclk = get_sclk(); 121 unsigned int sclk = get_sclk();
122 122
123 /* Set TCR1 and TCR2, TFSR is not enabled for uart */ 123 /* Set TCR1 and TCR2, TFSR is not enabled for uart */
124 SPORT_PUT_TCR1(up, (ITFS | TLSBIT | ITCLK)); 124 SPORT_PUT_TCR1(up, (LATFS | ITFS | TFSR | TLSBIT | ITCLK));
125 SPORT_PUT_TCR2(up, size + 1); 125 SPORT_PUT_TCR2(up, size + 1);
126 pr_debug("%s TCR1:%x, TCR2:%x\n", __func__, SPORT_GET_TCR1(up), SPORT_GET_TCR2(up)); 126 pr_debug("%s TCR1:%x, TCR2:%x\n", __func__, SPORT_GET_TCR1(up), SPORT_GET_TCR2(up));
127 127
diff --git a/drivers/serial/mfd.c b/drivers/serial/mfd.c
index bc9af503907f..5dff45c76d32 100644
--- a/drivers/serial/mfd.c
+++ b/drivers/serial/mfd.c
@@ -27,6 +27,7 @@
27#include <linux/init.h> 27#include <linux/init.h>
28#include <linux/console.h> 28#include <linux/console.h>
29#include <linux/sysrq.h> 29#include <linux/sysrq.h>
30#include <linux/slab.h>
30#include <linux/serial_reg.h> 31#include <linux/serial_reg.h>
31#include <linux/circ_buf.h> 32#include <linux/circ_buf.h>
32#include <linux/delay.h> 33#include <linux/delay.h>
@@ -1423,7 +1424,6 @@ static void hsu_global_init(void)
1423 } 1424 }
1424 1425
1425 phsu = hsu; 1426 phsu = hsu;
1426
1427 hsu_debugfs_init(hsu); 1427 hsu_debugfs_init(hsu);
1428 return; 1428 return;
1429 1429
@@ -1435,18 +1435,20 @@ err_free_region:
1435 1435
1436static void serial_hsu_remove(struct pci_dev *pdev) 1436static void serial_hsu_remove(struct pci_dev *pdev)
1437{ 1437{
1438 struct hsu_port *hsu; 1438 void *priv = pci_get_drvdata(pdev);
1439 int i; 1439 struct uart_hsu_port *up;
1440 1440
1441 hsu = pci_get_drvdata(pdev); 1441 if (!priv)
1442 if (!hsu)
1443 return; 1442 return;
1444 1443
1445 for (i = 0; i < 3; i++) 1444 /* For port 0/1/2, priv is the address of uart_hsu_port */
1446 uart_remove_one_port(&serial_hsu_reg, &hsu->port[i].port); 1445 if (pdev->device != 0x081E) {
1446 up = priv;
1447 uart_remove_one_port(&serial_hsu_reg, &up->port);
1448 }
1447 1449
1448 pci_set_drvdata(pdev, NULL); 1450 pci_set_drvdata(pdev, NULL);
1449 free_irq(hsu->irq, hsu); 1451 free_irq(pdev->irq, priv);
1450 pci_disable_device(pdev); 1452 pci_disable_device(pdev);
1451} 1453}
1452 1454
diff --git a/drivers/serial/mpc52xx_uart.c b/drivers/serial/mpc52xx_uart.c
index 8dedb266f143..c4399e23565a 100644
--- a/drivers/serial/mpc52xx_uart.c
+++ b/drivers/serial/mpc52xx_uart.c
@@ -500,6 +500,7 @@ static int __init mpc512x_psc_fifoc_init(void)
500 psc_fifoc = of_iomap(np, 0); 500 psc_fifoc = of_iomap(np, 0);
501 if (!psc_fifoc) { 501 if (!psc_fifoc) {
502 pr_err("%s: Can't map FIFOC\n", __func__); 502 pr_err("%s: Can't map FIFOC\n", __func__);
503 of_node_put(np);
503 return -ENODEV; 504 return -ENODEV;
504 } 505 }
505 506
diff --git a/drivers/serial/mrst_max3110.c b/drivers/serial/mrst_max3110.c
index f6ad1ecbff79..51c15f58e01e 100644
--- a/drivers/serial/mrst_max3110.c
+++ b/drivers/serial/mrst_max3110.c
@@ -29,6 +29,7 @@
29 29
30#include <linux/module.h> 30#include <linux/module.h>
31#include <linux/ioport.h> 31#include <linux/ioport.h>
32#include <linux/irq.h>
32#include <linux/init.h> 33#include <linux/init.h>
33#include <linux/console.h> 34#include <linux/console.h>
34#include <linux/sysrq.h> 35#include <linux/sysrq.h>
diff --git a/drivers/serial/serial_cs.c b/drivers/serial/serial_cs.c
index 141c69554bd4..7d475b2a79e8 100644
--- a/drivers/serial/serial_cs.c
+++ b/drivers/serial/serial_cs.c
@@ -335,8 +335,6 @@ static int serial_probe(struct pcmcia_device *link)
335 info->p_dev = link; 335 info->p_dev = link;
336 link->priv = info; 336 link->priv = info;
337 337
338 link->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
339 link->resource[0]->end = 8;
340 link->conf.Attributes = CONF_ENABLE_IRQ; 338 link->conf.Attributes = CONF_ENABLE_IRQ;
341 if (do_sound) { 339 if (do_sound) {
342 link->conf.Attributes |= CONF_ENABLE_SPKR; 340 link->conf.Attributes |= CONF_ENABLE_SPKR;
@@ -411,6 +409,27 @@ static int setup_serial(struct pcmcia_device *handle, struct serial_info * info,
411 409
412/*====================================================================*/ 410/*====================================================================*/
413 411
412static int pfc_config(struct pcmcia_device *p_dev)
413{
414 unsigned int port = 0;
415 struct serial_info *info = p_dev->priv;
416
417 if ((p_dev->resource[1]->end != 0) &&
418 (resource_size(p_dev->resource[1]) == 8)) {
419 port = p_dev->resource[1]->start;
420 info->slave = 1;
421 } else if ((info->manfid == MANFID_OSITECH) &&
422 (resource_size(p_dev->resource[0]) == 0x40)) {
423 port = p_dev->resource[0]->start + 0x28;
424 info->slave = 1;
425 }
426 if (info->slave)
427 return setup_serial(p_dev, info, port, p_dev->irq);
428
429 dev_warn(&p_dev->dev, "no usable port range found, giving up\n");
430 return -ENODEV;
431}
432
414static int simple_config_check(struct pcmcia_device *p_dev, 433static int simple_config_check(struct pcmcia_device *p_dev,
415 cistpl_cftable_entry_t *cf, 434 cistpl_cftable_entry_t *cf,
416 cistpl_cftable_entry_t *dflt, 435 cistpl_cftable_entry_t *dflt,
@@ -461,23 +480,8 @@ static int simple_config(struct pcmcia_device *link)
461 struct serial_info *info = link->priv; 480 struct serial_info *info = link->priv;
462 int i = -ENODEV, try; 481 int i = -ENODEV, try;
463 482
464 /* If the card is already configured, look up the port and irq */ 483 link->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
465 if (link->function_config) { 484 link->resource[0]->end = 8;
466 unsigned int port = 0;
467 if ((link->resource[1]->end != 0) &&
468 (resource_size(link->resource[1]) == 8)) {
469 port = link->resource[1]->end;
470 info->slave = 1;
471 } else if ((info->manfid == MANFID_OSITECH) &&
472 (resource_size(link->resource[0]) == 0x40)) {
473 port = link->resource[0]->start + 0x28;
474 info->slave = 1;
475 }
476 if (info->slave) {
477 return setup_serial(link, info, port,
478 link->irq);
479 }
480 }
481 485
482 /* First pass: look for a config entry that looks normal. 486 /* First pass: look for a config entry that looks normal.
483 * Two tries: without IO aliases, then with aliases */ 487 * Two tries: without IO aliases, then with aliases */
@@ -491,8 +495,7 @@ static int simple_config(struct pcmcia_device *link)
491 if (!pcmcia_loop_config(link, simple_config_check_notpicky, NULL)) 495 if (!pcmcia_loop_config(link, simple_config_check_notpicky, NULL))
492 goto found_port; 496 goto found_port;
493 497
494 printk(KERN_NOTICE 498 dev_warn(&link->dev, "no usable port range found, giving up\n");
495 "serial_cs: no usable port range found, giving up\n");
496 return -1; 499 return -1;
497 500
498found_port: 501found_port:
@@ -558,6 +561,7 @@ static int multi_config(struct pcmcia_device *link)
558 int i, base2 = 0; 561 int i, base2 = 0;
559 562
560 /* First, look for a generic full-sized window */ 563 /* First, look for a generic full-sized window */
564 link->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
561 link->resource[0]->end = info->multi * 8; 565 link->resource[0]->end = info->multi * 8;
562 if (pcmcia_loop_config(link, multi_config_check, &base2)) { 566 if (pcmcia_loop_config(link, multi_config_check, &base2)) {
563 /* If that didn't work, look for two windows */ 567 /* If that didn't work, look for two windows */
@@ -565,15 +569,14 @@ static int multi_config(struct pcmcia_device *link)
565 info->multi = 2; 569 info->multi = 2;
566 if (pcmcia_loop_config(link, multi_config_check_notpicky, 570 if (pcmcia_loop_config(link, multi_config_check_notpicky,
567 &base2)) { 571 &base2)) {
568 printk(KERN_NOTICE "serial_cs: no usable port range" 572 dev_warn(&link->dev, "no usable port range "
569 "found, giving up\n"); 573 "found, giving up\n");
570 return -ENODEV; 574 return -ENODEV;
571 } 575 }
572 } 576 }
573 577
574 if (!link->irq) 578 if (!link->irq)
575 dev_warn(&link->dev, 579 dev_warn(&link->dev, "no usable IRQ found, continuing...\n");
576 "serial_cs: no usable IRQ found, continuing...\n");
577 580
578 /* 581 /*
579 * Apply any configuration quirks. 582 * Apply any configuration quirks.
@@ -675,6 +678,7 @@ static int serial_config(struct pcmcia_device * link)
675 multifunction cards that ask for appropriate IO port ranges */ 678 multifunction cards that ask for appropriate IO port ranges */
676 if ((info->multi == 0) && 679 if ((info->multi == 0) &&
677 (link->has_func_id) && 680 (link->has_func_id) &&
681 (link->socket->pcmcia_pfc == 0) &&
678 ((link->func_id == CISTPL_FUNCID_MULTI) || 682 ((link->func_id == CISTPL_FUNCID_MULTI) ||
679 (link->func_id == CISTPL_FUNCID_SERIAL))) 683 (link->func_id == CISTPL_FUNCID_SERIAL)))
680 pcmcia_loop_config(link, serial_check_for_multi, info); 684 pcmcia_loop_config(link, serial_check_for_multi, info);
@@ -685,7 +689,13 @@ static int serial_config(struct pcmcia_device * link)
685 if (info->quirk && info->quirk->multi != -1) 689 if (info->quirk && info->quirk->multi != -1)
686 info->multi = info->quirk->multi; 690 info->multi = info->quirk->multi;
687 691
688 if (info->multi > 1) 692 dev_info(&link->dev,
693 "trying to set up [0x%04x:0x%04x] (pfc: %d, multi: %d, quirk: %p)\n",
694 link->manf_id, link->card_id,
695 link->socket->pcmcia_pfc, info->multi, info->quirk);
696 if (link->socket->pcmcia_pfc)
697 i = pfc_config(link);
698 else if (info->multi > 1)
689 i = multi_config(link); 699 i = multi_config(link);
690 else 700 else
691 i = simple_config(link); 701 i = simple_config(link);
@@ -704,7 +714,7 @@ static int serial_config(struct pcmcia_device * link)
704 return 0; 714 return 0;
705 715
706failed: 716failed:
707 dev_warn(&link->dev, "serial_cs: failed to initialize\n"); 717 dev_warn(&link->dev, "failed to initialize\n");
708 serial_remove(link); 718 serial_remove(link);
709 return -ENODEV; 719 return -ENODEV;
710} 720}
diff --git a/drivers/serial/sn_console.c b/drivers/serial/sn_console.c
index 7e5e5efea4e2..cff9a306660f 100644
--- a/drivers/serial/sn_console.c
+++ b/drivers/serial/sn_console.c
@@ -492,7 +492,7 @@ sn_receive_chars(struct sn_cons_port *port, unsigned long flags)
492 sysrq_requested = 0; 492 sysrq_requested = 0;
493 if (ch && time_before(jiffies, sysrq_timeout)) { 493 if (ch && time_before(jiffies, sysrq_timeout)) {
494 spin_unlock_irqrestore(&port->sc_port.lock, flags); 494 spin_unlock_irqrestore(&port->sc_port.lock, flags);
495 handle_sysrq(ch, NULL); 495 handle_sysrq(ch);
496 spin_lock_irqsave(&port->sc_port.lock, flags); 496 spin_lock_irqsave(&port->sc_port.lock, flags);
497 /* ignore actual sysrq command char */ 497 /* ignore actual sysrq command char */
498 continue; 498 continue;
diff --git a/drivers/spi/amba-pl022.c b/drivers/spi/amba-pl022.c
index acd35d1ebd12..4c37c4e28647 100644
--- a/drivers/spi/amba-pl022.c
+++ b/drivers/spi/amba-pl022.c
@@ -503,8 +503,9 @@ static void giveback(struct pl022 *pl022)
503 msg->state = NULL; 503 msg->state = NULL;
504 if (msg->complete) 504 if (msg->complete)
505 msg->complete(msg->context); 505 msg->complete(msg->context);
506 /* This message is completed, so let's turn off the clock! */ 506 /* This message is completed, so let's turn off the clocks! */
507 clk_disable(pl022->clk); 507 clk_disable(pl022->clk);
508 amba_pclk_disable(pl022->adev);
508} 509}
509 510
510/** 511/**
@@ -1139,9 +1140,10 @@ static void pump_messages(struct work_struct *work)
1139 /* Setup the SPI using the per chip configuration */ 1140 /* Setup the SPI using the per chip configuration */
1140 pl022->cur_chip = spi_get_ctldata(pl022->cur_msg->spi); 1141 pl022->cur_chip = spi_get_ctldata(pl022->cur_msg->spi);
1141 /* 1142 /*
1142 * We enable the clock here, then the clock will be disabled when 1143 * We enable the clocks here, then the clocks will be disabled when
1143 * giveback() is called in each method (poll/interrupt/DMA) 1144 * giveback() is called in each method (poll/interrupt/DMA)
1144 */ 1145 */
1146 amba_pclk_enable(pl022->adev);
1145 clk_enable(pl022->clk); 1147 clk_enable(pl022->clk);
1146 restore_state(pl022); 1148 restore_state(pl022);
1147 flush(pl022); 1149 flush(pl022);
@@ -1786,11 +1788,9 @@ pl022_probe(struct amba_device *adev, struct amba_id *id)
1786 } 1788 }
1787 1789
1788 /* Disable SSP */ 1790 /* Disable SSP */
1789 clk_enable(pl022->clk);
1790 writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)), 1791 writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)),
1791 SSP_CR1(pl022->virtbase)); 1792 SSP_CR1(pl022->virtbase));
1792 load_ssp_default_config(pl022); 1793 load_ssp_default_config(pl022);
1793 clk_disable(pl022->clk);
1794 1794
1795 status = request_irq(adev->irq[0], pl022_interrupt_handler, 0, "pl022", 1795 status = request_irq(adev->irq[0], pl022_interrupt_handler, 0, "pl022",
1796 pl022); 1796 pl022);
@@ -1818,6 +1818,8 @@ pl022_probe(struct amba_device *adev, struct amba_id *id)
1818 goto err_spi_register; 1818 goto err_spi_register;
1819 } 1819 }
1820 dev_dbg(dev, "probe succeded\n"); 1820 dev_dbg(dev, "probe succeded\n");
1821 /* Disable the silicon block pclk and clock it when needed */
1822 amba_pclk_disable(adev);
1821 return 0; 1823 return 0;
1822 1824
1823 err_spi_register: 1825 err_spi_register:
@@ -1879,9 +1881,9 @@ static int pl022_suspend(struct amba_device *adev, pm_message_t state)
1879 return status; 1881 return status;
1880 } 1882 }
1881 1883
1882 clk_enable(pl022->clk); 1884 amba_pclk_enable(adev);
1883 load_ssp_default_config(pl022); 1885 load_ssp_default_config(pl022);
1884 clk_disable(pl022->clk); 1886 amba_pclk_disable(adev);
1885 dev_dbg(&adev->dev, "suspended\n"); 1887 dev_dbg(&adev->dev, "suspended\n");
1886 return 0; 1888 return 0;
1887} 1889}
@@ -1981,7 +1983,7 @@ static int __init pl022_init(void)
1981 return amba_driver_register(&pl022_driver); 1983 return amba_driver_register(&pl022_driver);
1982} 1984}
1983 1985
1984module_init(pl022_init); 1986subsys_initcall(pl022_init);
1985 1987
1986static void __exit pl022_exit(void) 1988static void __exit pl022_exit(void)
1987{ 1989{
diff --git a/drivers/spi/dw_spi.c b/drivers/spi/dw_spi.c
index d256cb00604c..56247853c298 100644
--- a/drivers/spi/dw_spi.c
+++ b/drivers/spi/dw_spi.c
@@ -181,10 +181,6 @@ static void flush(struct dw_spi *dws)
181 wait_till_not_busy(dws); 181 wait_till_not_busy(dws);
182} 182}
183 183
184static void null_cs_control(u32 command)
185{
186}
187
188static int null_writer(struct dw_spi *dws) 184static int null_writer(struct dw_spi *dws)
189{ 185{
190 u8 n_bytes = dws->n_bytes; 186 u8 n_bytes = dws->n_bytes;
@@ -322,7 +318,7 @@ static void giveback(struct dw_spi *dws)
322 struct spi_transfer, 318 struct spi_transfer,
323 transfer_list); 319 transfer_list);
324 320
325 if (!last_transfer->cs_change) 321 if (!last_transfer->cs_change && dws->cs_control)
326 dws->cs_control(MRST_SPI_DEASSERT); 322 dws->cs_control(MRST_SPI_DEASSERT);
327 323
328 msg->state = NULL; 324 msg->state = NULL;
@@ -396,6 +392,11 @@ static irqreturn_t interrupt_transfer(struct dw_spi *dws)
396static irqreturn_t dw_spi_irq(int irq, void *dev_id) 392static irqreturn_t dw_spi_irq(int irq, void *dev_id)
397{ 393{
398 struct dw_spi *dws = dev_id; 394 struct dw_spi *dws = dev_id;
395 u16 irq_status, irq_mask = 0x3f;
396
397 irq_status = dw_readw(dws, isr) & irq_mask;
398 if (!irq_status)
399 return IRQ_NONE;
399 400
400 if (!dws->cur_msg) { 401 if (!dws->cur_msg) {
401 spi_mask_intr(dws, SPI_INT_TXEI); 402 spi_mask_intr(dws, SPI_INT_TXEI);
@@ -544,13 +545,13 @@ static void pump_transfers(unsigned long data)
544 */ 545 */
545 if (dws->cs_control) { 546 if (dws->cs_control) {
546 if (dws->rx && dws->tx) 547 if (dws->rx && dws->tx)
547 chip->tmode = 0x00; 548 chip->tmode = SPI_TMOD_TR;
548 else if (dws->rx) 549 else if (dws->rx)
549 chip->tmode = 0x02; 550 chip->tmode = SPI_TMOD_RO;
550 else 551 else
551 chip->tmode = 0x01; 552 chip->tmode = SPI_TMOD_TO;
552 553
553 cr0 &= ~(0x3 << SPI_MODE_OFFSET); 554 cr0 &= ~SPI_TMOD_MASK;
554 cr0 |= (chip->tmode << SPI_TMOD_OFFSET); 555 cr0 |= (chip->tmode << SPI_TMOD_OFFSET);
555 } 556 }
556 557
@@ -699,9 +700,6 @@ static int dw_spi_setup(struct spi_device *spi)
699 chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL); 700 chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
700 if (!chip) 701 if (!chip)
701 return -ENOMEM; 702 return -ENOMEM;
702
703 chip->cs_control = null_cs_control;
704 chip->enable_dma = 0;
705 } 703 }
706 704
707 /* 705 /*
@@ -883,7 +881,7 @@ int __devinit dw_spi_add_host(struct dw_spi *dws)
883 dws->dma_inited = 0; 881 dws->dma_inited = 0;
884 dws->dma_addr = (dma_addr_t)(dws->paddr + 0x60); 882 dws->dma_addr = (dma_addr_t)(dws->paddr + 0x60);
885 883
886 ret = request_irq(dws->irq, dw_spi_irq, 0, 884 ret = request_irq(dws->irq, dw_spi_irq, IRQF_SHARED,
887 "dw_spi", dws); 885 "dw_spi", dws);
888 if (ret < 0) { 886 if (ret < 0) {
889 dev_err(&master->dev, "can not get IRQ\n"); 887 dev_err(&master->dev, "can not get IRQ\n");
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index a9e5c79ae52a..b5a78a1f4421 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -23,6 +23,7 @@
23#include <linux/init.h> 23#include <linux/init.h>
24#include <linux/cache.h> 24#include <linux/cache.h>
25#include <linux/mutex.h> 25#include <linux/mutex.h>
26#include <linux/of_device.h>
26#include <linux/slab.h> 27#include <linux/slab.h>
27#include <linux/mod_devicetable.h> 28#include <linux/mod_devicetable.h>
28#include <linux/spi/spi.h> 29#include <linux/spi/spi.h>
@@ -86,6 +87,10 @@ static int spi_match_device(struct device *dev, struct device_driver *drv)
86 const struct spi_device *spi = to_spi_device(dev); 87 const struct spi_device *spi = to_spi_device(dev);
87 const struct spi_driver *sdrv = to_spi_driver(drv); 88 const struct spi_driver *sdrv = to_spi_driver(drv);
88 89
90 /* Attempt an OF style match */
91 if (of_driver_match_device(dev, drv))
92 return 1;
93
89 if (sdrv->id_table) 94 if (sdrv->id_table)
90 return !!spi_match_id(sdrv->id_table, spi); 95 return !!spi_match_id(sdrv->id_table, spi);
91 96
@@ -554,11 +559,9 @@ done:
554EXPORT_SYMBOL_GPL(spi_register_master); 559EXPORT_SYMBOL_GPL(spi_register_master);
555 560
556 561
557static int __unregister(struct device *dev, void *master_dev) 562static int __unregister(struct device *dev, void *null)
558{ 563{
559 /* note: before about 2.6.14-rc1 this would corrupt memory: */ 564 spi_unregister_device(to_spi_device(dev));
560 if (dev != master_dev)
561 spi_unregister_device(to_spi_device(dev));
562 return 0; 565 return 0;
563} 566}
564 567
@@ -576,8 +579,7 @@ void spi_unregister_master(struct spi_master *master)
576{ 579{
577 int dummy; 580 int dummy;
578 581
579 dummy = device_for_each_child(master->dev.parent, &master->dev, 582 dummy = device_for_each_child(&master->dev, NULL, __unregister);
580 __unregister);
581 device_unregister(&master->dev); 583 device_unregister(&master->dev);
582} 584}
583EXPORT_SYMBOL_GPL(spi_unregister_master); 585EXPORT_SYMBOL_GPL(spi_unregister_master);
diff --git a/drivers/spi/spi_gpio.c b/drivers/spi/spi_gpio.c
index e24a63498acb..63e51b011d50 100644
--- a/drivers/spi/spi_gpio.c
+++ b/drivers/spi/spi_gpio.c
@@ -350,7 +350,7 @@ static int __init spi_gpio_probe(struct platform_device *pdev)
350 spi_gpio->bitbang.master = spi_master_get(master); 350 spi_gpio->bitbang.master = spi_master_get(master);
351 spi_gpio->bitbang.chipselect = spi_gpio_chipselect; 351 spi_gpio->bitbang.chipselect = spi_gpio_chipselect;
352 352
353 if ((master_flags & (SPI_MASTER_NO_RX | SPI_MASTER_NO_RX)) == 0) { 353 if ((master_flags & (SPI_MASTER_NO_TX | SPI_MASTER_NO_RX)) == 0) {
354 spi_gpio->bitbang.txrx_word[SPI_MODE_0] = spi_gpio_txrx_word_mode0; 354 spi_gpio->bitbang.txrx_word[SPI_MODE_0] = spi_gpio_txrx_word_mode0;
355 spi_gpio->bitbang.txrx_word[SPI_MODE_1] = spi_gpio_txrx_word_mode1; 355 spi_gpio->bitbang.txrx_word[SPI_MODE_1] = spi_gpio_txrx_word_mode1;
356 spi_gpio->bitbang.txrx_word[SPI_MODE_2] = spi_gpio_txrx_word_mode2; 356 spi_gpio->bitbang.txrx_word[SPI_MODE_2] = spi_gpio_txrx_word_mode2;
diff --git a/drivers/spi/spi_mpc8xxx.c b/drivers/spi/spi_mpc8xxx.c
index d31b57f7baaf..1dd86b835cd8 100644
--- a/drivers/spi/spi_mpc8xxx.c
+++ b/drivers/spi/spi_mpc8xxx.c
@@ -408,11 +408,17 @@ static void mpc8xxx_spi_cpm_bufs_start(struct mpc8xxx_spi *mspi)
408 408
409 xfer_ofs = mspi->xfer_in_progress->len - mspi->count; 409 xfer_ofs = mspi->xfer_in_progress->len - mspi->count;
410 410
411 out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma + xfer_ofs); 411 if (mspi->rx_dma == mspi->dma_dummy_rx)
412 out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma);
413 else
414 out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma + xfer_ofs);
412 out_be16(&rx_bd->cbd_datlen, 0); 415 out_be16(&rx_bd->cbd_datlen, 0);
413 out_be16(&rx_bd->cbd_sc, BD_SC_EMPTY | BD_SC_INTRPT | BD_SC_WRAP); 416 out_be16(&rx_bd->cbd_sc, BD_SC_EMPTY | BD_SC_INTRPT | BD_SC_WRAP);
414 417
415 out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma + xfer_ofs); 418 if (mspi->tx_dma == mspi->dma_dummy_tx)
419 out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma);
420 else
421 out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma + xfer_ofs);
416 out_be16(&tx_bd->cbd_datlen, xfer_len); 422 out_be16(&tx_bd->cbd_datlen, xfer_len);
417 out_be16(&tx_bd->cbd_sc, BD_SC_READY | BD_SC_INTRPT | BD_SC_WRAP | 423 out_be16(&tx_bd->cbd_sc, BD_SC_READY | BD_SC_INTRPT | BD_SC_WRAP |
418 BD_SC_LAST); 424 BD_SC_LAST);
diff --git a/drivers/spi/spi_s3c64xx.c b/drivers/spi/spi_s3c64xx.c
index 97365815a729..c3038da2648a 100644
--- a/drivers/spi/spi_s3c64xx.c
+++ b/drivers/spi/spi_s3c64xx.c
@@ -200,6 +200,9 @@ static void flush_fifo(struct s3c64xx_spi_driver_data *sdd)
200 val = readl(regs + S3C64XX_SPI_STATUS); 200 val = readl(regs + S3C64XX_SPI_STATUS);
201 } while (TX_FIFO_LVL(val, sci) && loops--); 201 } while (TX_FIFO_LVL(val, sci) && loops--);
202 202
203 if (loops == 0)
204 dev_warn(&sdd->pdev->dev, "Timed out flushing TX FIFO\n");
205
203 /* Flush RxFIFO*/ 206 /* Flush RxFIFO*/
204 loops = msecs_to_loops(1); 207 loops = msecs_to_loops(1);
205 do { 208 do {
@@ -210,6 +213,9 @@ static void flush_fifo(struct s3c64xx_spi_driver_data *sdd)
210 break; 213 break;
211 } while (loops--); 214 } while (loops--);
212 215
216 if (loops == 0)
217 dev_warn(&sdd->pdev->dev, "Timed out flushing RX FIFO\n");
218
213 val = readl(regs + S3C64XX_SPI_CH_CFG); 219 val = readl(regs + S3C64XX_SPI_CH_CFG);
214 val &= ~S3C64XX_SPI_CH_SW_RST; 220 val &= ~S3C64XX_SPI_CH_SW_RST;
215 writel(val, regs + S3C64XX_SPI_CH_CFG); 221 writel(val, regs + S3C64XX_SPI_CH_CFG);
@@ -320,16 +326,17 @@ static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd,
320 326
321 /* millisecs to xfer 'len' bytes @ 'cur_speed' */ 327 /* millisecs to xfer 'len' bytes @ 'cur_speed' */
322 ms = xfer->len * 8 * 1000 / sdd->cur_speed; 328 ms = xfer->len * 8 * 1000 / sdd->cur_speed;
323 ms += 5; /* some tolerance */ 329 ms += 10; /* some tolerance */
324 330
325 if (dma_mode) { 331 if (dma_mode) {
326 val = msecs_to_jiffies(ms) + 10; 332 val = msecs_to_jiffies(ms) + 10;
327 val = wait_for_completion_timeout(&sdd->xfer_completion, val); 333 val = wait_for_completion_timeout(&sdd->xfer_completion, val);
328 } else { 334 } else {
335 u32 status;
329 val = msecs_to_loops(ms); 336 val = msecs_to_loops(ms);
330 do { 337 do {
331 val = readl(regs + S3C64XX_SPI_STATUS); 338 status = readl(regs + S3C64XX_SPI_STATUS);
332 } while (RX_FIFO_LVL(val, sci) < xfer->len && --val); 339 } while (RX_FIFO_LVL(status, sci) < xfer->len && --val);
333 } 340 }
334 341
335 if (!val) 342 if (!val)
@@ -447,8 +454,8 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
447 writel(val, regs + S3C64XX_SPI_CLK_CFG); 454 writel(val, regs + S3C64XX_SPI_CLK_CFG);
448} 455}
449 456
450void s3c64xx_spi_dma_rxcb(struct s3c2410_dma_chan *chan, void *buf_id, 457static void s3c64xx_spi_dma_rxcb(struct s3c2410_dma_chan *chan, void *buf_id,
451 int size, enum s3c2410_dma_buffresult res) 458 int size, enum s3c2410_dma_buffresult res)
452{ 459{
453 struct s3c64xx_spi_driver_data *sdd = buf_id; 460 struct s3c64xx_spi_driver_data *sdd = buf_id;
454 unsigned long flags; 461 unsigned long flags;
@@ -467,8 +474,8 @@ void s3c64xx_spi_dma_rxcb(struct s3c2410_dma_chan *chan, void *buf_id,
467 spin_unlock_irqrestore(&sdd->lock, flags); 474 spin_unlock_irqrestore(&sdd->lock, flags);
468} 475}
469 476
470void s3c64xx_spi_dma_txcb(struct s3c2410_dma_chan *chan, void *buf_id, 477static void s3c64xx_spi_dma_txcb(struct s3c2410_dma_chan *chan, void *buf_id,
471 int size, enum s3c2410_dma_buffresult res) 478 int size, enum s3c2410_dma_buffresult res)
472{ 479{
473 struct s3c64xx_spi_driver_data *sdd = buf_id; 480 struct s3c64xx_spi_driver_data *sdd = buf_id;
474 unsigned long flags; 481 unsigned long flags;
@@ -508,8 +515,9 @@ static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd,
508 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 515 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
509 516
510 if (xfer->tx_buf != NULL) { 517 if (xfer->tx_buf != NULL) {
511 xfer->tx_dma = dma_map_single(dev, xfer->tx_buf, 518 xfer->tx_dma = dma_map_single(dev,
512 xfer->len, DMA_TO_DEVICE); 519 (void *)xfer->tx_buf, xfer->len,
520 DMA_TO_DEVICE);
513 if (dma_mapping_error(dev, xfer->tx_dma)) { 521 if (dma_mapping_error(dev, xfer->tx_dma)) {
514 dev_err(dev, "dma_map_single Tx failed\n"); 522 dev_err(dev, "dma_map_single Tx failed\n");
515 xfer->tx_dma = XFER_DMAADDR_INVALID; 523 xfer->tx_dma = XFER_DMAADDR_INVALID;
@@ -919,6 +927,13 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
919 return -ENODEV; 927 return -ENODEV;
920 } 928 }
921 929
930 sci = pdev->dev.platform_data;
931 if (!sci->src_clk_name) {
932 dev_err(&pdev->dev,
933 "Board init must call s3c64xx_spi_set_info()\n");
934 return -EINVAL;
935 }
936
922 /* Check for availability of necessary resource */ 937 /* Check for availability of necessary resource */
923 938
924 dmatx_res = platform_get_resource(pdev, IORESOURCE_DMA, 0); 939 dmatx_res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
@@ -946,8 +961,6 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
946 return -ENOMEM; 961 return -ENOMEM;
947 } 962 }
948 963
949 sci = pdev->dev.platform_data;
950
951 platform_set_drvdata(pdev, master); 964 platform_set_drvdata(pdev, master);
952 965
953 sdd = spi_master_get_devdata(master); 966 sdd = spi_master_get_devdata(master);
@@ -1170,7 +1183,7 @@ static int __init s3c64xx_spi_init(void)
1170{ 1183{
1171 return platform_driver_probe(&s3c64xx_spi_driver, s3c64xx_spi_probe); 1184 return platform_driver_probe(&s3c64xx_spi_driver, s3c64xx_spi_probe);
1172} 1185}
1173module_init(s3c64xx_spi_init); 1186subsys_initcall(s3c64xx_spi_init);
1174 1187
1175static void __exit s3c64xx_spi_exit(void) 1188static void __exit s3c64xx_spi_exit(void)
1176{ 1189{
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 4a7a7a7f11b6..335311a98fdc 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -113,8 +113,6 @@ source "drivers/staging/vme/Kconfig"
113 113
114source "drivers/staging/memrar/Kconfig" 114source "drivers/staging/memrar/Kconfig"
115 115
116source "drivers/staging/sep/Kconfig"
117
118source "drivers/staging/iio/Kconfig" 116source "drivers/staging/iio/Kconfig"
119 117
120source "drivers/staging/zram/Kconfig" 118source "drivers/staging/zram/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index ca5c03eb3ce3..e3f1e1b6095e 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -38,7 +38,6 @@ obj-$(CONFIG_FB_UDL) += udlfb/
38obj-$(CONFIG_HYPERV) += hv/ 38obj-$(CONFIG_HYPERV) += hv/
39obj-$(CONFIG_VME_BUS) += vme/ 39obj-$(CONFIG_VME_BUS) += vme/
40obj-$(CONFIG_MRST_RAR_HANDLER) += memrar/ 40obj-$(CONFIG_MRST_RAR_HANDLER) += memrar/
41obj-$(CONFIG_DX_SEP) += sep/
42obj-$(CONFIG_IIO) += iio/ 41obj-$(CONFIG_IIO) += iio/
43obj-$(CONFIG_ZRAM) += zram/ 42obj-$(CONFIG_ZRAM) += zram/
44obj-$(CONFIG_WLAGS49_H2) += wlags49_h2/ 43obj-$(CONFIG_WLAGS49_H2) += wlags49_h2/
diff --git a/drivers/staging/batman-adv/bat_sysfs.c b/drivers/staging/batman-adv/bat_sysfs.c
index b4a8d5eb64fa..05ca15a6c9f8 100644
--- a/drivers/staging/batman-adv/bat_sysfs.c
+++ b/drivers/staging/batman-adv/bat_sysfs.c
@@ -267,6 +267,10 @@ static ssize_t store_log_level(struct kobject *kobj, struct attribute *attr,
267 if (atomic_read(&bat_priv->log_level) == log_level_tmp) 267 if (atomic_read(&bat_priv->log_level) == log_level_tmp)
268 return count; 268 return count;
269 269
270 bat_info(net_dev, "Changing log level from: %i to: %li\n",
271 atomic_read(&bat_priv->log_level),
272 log_level_tmp);
273
270 atomic_set(&bat_priv->log_level, (unsigned)log_level_tmp); 274 atomic_set(&bat_priv->log_level, (unsigned)log_level_tmp);
271 return count; 275 return count;
272} 276}
diff --git a/drivers/staging/batman-adv/hard-interface.c b/drivers/staging/batman-adv/hard-interface.c
index 92c216a56885..6e973a79aa25 100644
--- a/drivers/staging/batman-adv/hard-interface.c
+++ b/drivers/staging/batman-adv/hard-interface.c
@@ -30,7 +30,6 @@
30#include "hash.h" 30#include "hash.h"
31 31
32#include <linux/if_arp.h> 32#include <linux/if_arp.h>
33#include <linux/netfilter_bridge.h>
34 33
35#define MIN(x, y) ((x) < (y) ? (x) : (y)) 34#define MIN(x, y) ((x) < (y) ? (x) : (y))
36 35
@@ -129,6 +128,9 @@ static bool hardif_is_iface_up(struct batman_if *batman_if)
129 128
130static void update_mac_addresses(struct batman_if *batman_if) 129static void update_mac_addresses(struct batman_if *batman_if)
131{ 130{
131 if (!batman_if || !batman_if->packet_buff)
132 return;
133
132 addr_to_string(batman_if->addr_str, batman_if->net_dev->dev_addr); 134 addr_to_string(batman_if->addr_str, batman_if->net_dev->dev_addr);
133 135
134 memcpy(((struct batman_packet *)(batman_if->packet_buff))->orig, 136 memcpy(((struct batman_packet *)(batman_if->packet_buff))->orig,
@@ -194,8 +196,6 @@ static void hardif_activate_interface(struct net_device *net_dev,
194 if (batman_if->if_status != IF_INACTIVE) 196 if (batman_if->if_status != IF_INACTIVE)
195 return; 197 return;
196 198
197 dev_hold(batman_if->net_dev);
198
199 update_mac_addresses(batman_if); 199 update_mac_addresses(batman_if);
200 batman_if->if_status = IF_TO_BE_ACTIVATED; 200 batman_if->if_status = IF_TO_BE_ACTIVATED;
201 201
@@ -222,8 +222,6 @@ static void hardif_deactivate_interface(struct net_device *net_dev,
222 (batman_if->if_status != IF_TO_BE_ACTIVATED)) 222 (batman_if->if_status != IF_TO_BE_ACTIVATED))
223 return; 223 return;
224 224
225 dev_put(batman_if->net_dev);
226
227 batman_if->if_status = IF_INACTIVE; 225 batman_if->if_status = IF_INACTIVE;
228 226
229 bat_info(net_dev, "Interface deactivated: %s\n", batman_if->dev); 227 bat_info(net_dev, "Interface deactivated: %s\n", batman_if->dev);
@@ -318,11 +316,13 @@ static struct batman_if *hardif_add_interface(struct net_device *net_dev)
318 if (ret != 1) 316 if (ret != 1)
319 goto out; 317 goto out;
320 318
319 dev_hold(net_dev);
320
321 batman_if = kmalloc(sizeof(struct batman_if), GFP_ATOMIC); 321 batman_if = kmalloc(sizeof(struct batman_if), GFP_ATOMIC);
322 if (!batman_if) { 322 if (!batman_if) {
323 pr_err("Can't add interface (%s): out of memory\n", 323 pr_err("Can't add interface (%s): out of memory\n",
324 net_dev->name); 324 net_dev->name);
325 goto out; 325 goto release_dev;
326 } 326 }
327 327
328 batman_if->dev = kstrdup(net_dev->name, GFP_ATOMIC); 328 batman_if->dev = kstrdup(net_dev->name, GFP_ATOMIC);
@@ -336,6 +336,7 @@ static struct batman_if *hardif_add_interface(struct net_device *net_dev)
336 batman_if->if_num = -1; 336 batman_if->if_num = -1;
337 batman_if->net_dev = net_dev; 337 batman_if->net_dev = net_dev;
338 batman_if->if_status = IF_NOT_IN_USE; 338 batman_if->if_status = IF_NOT_IN_USE;
339 batman_if->packet_buff = NULL;
339 INIT_LIST_HEAD(&batman_if->list); 340 INIT_LIST_HEAD(&batman_if->list);
340 341
341 check_known_mac_addr(batman_if->net_dev->dev_addr); 342 check_known_mac_addr(batman_if->net_dev->dev_addr);
@@ -346,6 +347,8 @@ free_dev:
346 kfree(batman_if->dev); 347 kfree(batman_if->dev);
347free_if: 348free_if:
348 kfree(batman_if); 349 kfree(batman_if);
350release_dev:
351 dev_put(net_dev);
349out: 352out:
350 return NULL; 353 return NULL;
351} 354}
@@ -374,6 +377,7 @@ static void hardif_remove_interface(struct batman_if *batman_if)
374 batman_if->if_status = IF_TO_BE_REMOVED; 377 batman_if->if_status = IF_TO_BE_REMOVED;
375 list_del_rcu(&batman_if->list); 378 list_del_rcu(&batman_if->list);
376 sysfs_del_hardif(&batman_if->hardif_obj); 379 sysfs_del_hardif(&batman_if->hardif_obj);
380 dev_put(batman_if->net_dev);
377 call_rcu(&batman_if->rcu, hardif_free_interface); 381 call_rcu(&batman_if->rcu, hardif_free_interface);
378} 382}
379 383
@@ -393,15 +397,13 @@ static int hard_if_event(struct notifier_block *this,
393 /* FIXME: each batman_if will be attached to a softif */ 397 /* FIXME: each batman_if will be attached to a softif */
394 struct bat_priv *bat_priv = netdev_priv(soft_device); 398 struct bat_priv *bat_priv = netdev_priv(soft_device);
395 399
396 if (!batman_if) 400 if (!batman_if && event == NETDEV_REGISTER)
397 batman_if = hardif_add_interface(net_dev); 401 batman_if = hardif_add_interface(net_dev);
398 402
399 if (!batman_if) 403 if (!batman_if)
400 goto out; 404 goto out;
401 405
402 switch (event) { 406 switch (event) {
403 case NETDEV_REGISTER:
404 break;
405 case NETDEV_UP: 407 case NETDEV_UP:
406 hardif_activate_interface(soft_device, bat_priv, batman_if); 408 hardif_activate_interface(soft_device, bat_priv, batman_if);
407 break; 409 break;
@@ -428,11 +430,6 @@ out:
428 return NOTIFY_DONE; 430 return NOTIFY_DONE;
429} 431}
430 432
431static int batman_skb_recv_finish(struct sk_buff *skb)
432{
433 return NF_ACCEPT;
434}
435
436/* receive a packet with the batman ethertype coming on a hard 433/* receive a packet with the batman ethertype coming on a hard
437 * interface */ 434 * interface */
438int batman_skb_recv(struct sk_buff *skb, struct net_device *dev, 435int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
@@ -442,8 +439,6 @@ int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
442 struct bat_priv *bat_priv = netdev_priv(soft_device); 439 struct bat_priv *bat_priv = netdev_priv(soft_device);
443 struct batman_packet *batman_packet; 440 struct batman_packet *batman_packet;
444 struct batman_if *batman_if; 441 struct batman_if *batman_if;
445 struct net_device_stats *stats;
446 struct rtnl_link_stats64 temp;
447 int ret; 442 int ret;
448 443
449 skb = skb_share_check(skb, GFP_ATOMIC); 444 skb = skb_share_check(skb, GFP_ATOMIC);
@@ -455,13 +450,6 @@ int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
455 if (atomic_read(&module_state) != MODULE_ACTIVE) 450 if (atomic_read(&module_state) != MODULE_ACTIVE)
456 goto err_free; 451 goto err_free;
457 452
458 /* if netfilter/ebtables wants to block incoming batman
459 * packets then give them a chance to do so here */
460 ret = NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, dev, NULL,
461 batman_skb_recv_finish);
462 if (ret != 1)
463 goto err_out;
464
465 /* packet should hold at least type and version */ 453 /* packet should hold at least type and version */
466 if (unlikely(skb_headlen(skb) < 2)) 454 if (unlikely(skb_headlen(skb) < 2))
467 goto err_free; 455 goto err_free;
@@ -479,12 +467,6 @@ int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
479 if (batman_if->if_status != IF_ACTIVE) 467 if (batman_if->if_status != IF_ACTIVE)
480 goto err_free; 468 goto err_free;
481 469
482 stats = (struct net_device_stats *)dev_get_stats(skb->dev, &temp);
483 if (stats) {
484 stats->rx_packets++;
485 stats->rx_bytes += skb->len;
486 }
487
488 batman_packet = (struct batman_packet *)skb->data; 470 batman_packet = (struct batman_packet *)skb->data;
489 471
490 if (batman_packet->version != COMPAT_VERSION) { 472 if (batman_packet->version != COMPAT_VERSION) {
diff --git a/drivers/staging/batman-adv/icmp_socket.c b/drivers/staging/batman-adv/icmp_socket.c
index fc3d32c12729..3ae7dd2d2d4d 100644
--- a/drivers/staging/batman-adv/icmp_socket.c
+++ b/drivers/staging/batman-adv/icmp_socket.c
@@ -67,6 +67,7 @@ static int bat_socket_open(struct inode *inode, struct file *file)
67 INIT_LIST_HEAD(&socket_client->queue_list); 67 INIT_LIST_HEAD(&socket_client->queue_list);
68 socket_client->queue_len = 0; 68 socket_client->queue_len = 0;
69 socket_client->index = i; 69 socket_client->index = i;
70 socket_client->bat_priv = inode->i_private;
70 spin_lock_init(&socket_client->lock); 71 spin_lock_init(&socket_client->lock);
71 init_waitqueue_head(&socket_client->queue_wait); 72 init_waitqueue_head(&socket_client->queue_wait);
72 73
@@ -151,9 +152,8 @@ static ssize_t bat_socket_read(struct file *file, char __user *buf,
151static ssize_t bat_socket_write(struct file *file, const char __user *buff, 152static ssize_t bat_socket_write(struct file *file, const char __user *buff,
152 size_t len, loff_t *off) 153 size_t len, loff_t *off)
153{ 154{
154 /* FIXME: each orig_node->batman_if will be attached to a softif */
155 struct bat_priv *bat_priv = netdev_priv(soft_device);
156 struct socket_client *socket_client = file->private_data; 155 struct socket_client *socket_client = file->private_data;
156 struct bat_priv *bat_priv = socket_client->bat_priv;
157 struct icmp_packet_rr icmp_packet; 157 struct icmp_packet_rr icmp_packet;
158 struct orig_node *orig_node; 158 struct orig_node *orig_node;
159 struct batman_if *batman_if; 159 struct batman_if *batman_if;
@@ -168,6 +168,9 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff,
168 return -EINVAL; 168 return -EINVAL;
169 } 169 }
170 170
171 if (!bat_priv->primary_if)
172 return -EFAULT;
173
171 if (len >= sizeof(struct icmp_packet_rr)) 174 if (len >= sizeof(struct icmp_packet_rr))
172 packet_len = sizeof(struct icmp_packet_rr); 175 packet_len = sizeof(struct icmp_packet_rr);
173 176
@@ -223,7 +226,8 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff,
223 if (batman_if->if_status != IF_ACTIVE) 226 if (batman_if->if_status != IF_ACTIVE)
224 goto dst_unreach; 227 goto dst_unreach;
225 228
226 memcpy(icmp_packet.orig, batman_if->net_dev->dev_addr, ETH_ALEN); 229 memcpy(icmp_packet.orig,
230 bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
227 231
228 if (packet_len == sizeof(struct icmp_packet_rr)) 232 if (packet_len == sizeof(struct icmp_packet_rr))
229 memcpy(icmp_packet.rr, batman_if->net_dev->dev_addr, ETH_ALEN); 233 memcpy(icmp_packet.rr, batman_if->net_dev->dev_addr, ETH_ALEN);
@@ -271,7 +275,7 @@ int bat_socket_setup(struct bat_priv *bat_priv)
271 goto err; 275 goto err;
272 276
273 d = debugfs_create_file(ICMP_SOCKET, S_IFREG | S_IWUSR | S_IRUSR, 277 d = debugfs_create_file(ICMP_SOCKET, S_IFREG | S_IWUSR | S_IRUSR,
274 bat_priv->debug_dir, NULL, &fops); 278 bat_priv->debug_dir, bat_priv, &fops);
275 if (d) 279 if (d)
276 goto err; 280 goto err;
277 281
diff --git a/drivers/staging/batman-adv/main.c b/drivers/staging/batman-adv/main.c
index 2686019fe4e1..ef7c20ae7979 100644
--- a/drivers/staging/batman-adv/main.c
+++ b/drivers/staging/batman-adv/main.c
@@ -250,10 +250,13 @@ int choose_orig(void *data, int32_t size)
250int is_my_mac(uint8_t *addr) 250int is_my_mac(uint8_t *addr)
251{ 251{
252 struct batman_if *batman_if; 252 struct batman_if *batman_if;
253
253 rcu_read_lock(); 254 rcu_read_lock();
254 list_for_each_entry_rcu(batman_if, &if_list, list) { 255 list_for_each_entry_rcu(batman_if, &if_list, list) {
255 if ((batman_if->net_dev) && 256 if (batman_if->if_status != IF_ACTIVE)
256 (compare_orig(batman_if->net_dev->dev_addr, addr))) { 257 continue;
258
259 if (compare_orig(batman_if->net_dev->dev_addr, addr)) {
257 rcu_read_unlock(); 260 rcu_read_unlock();
258 return 1; 261 return 1;
259 } 262 }
diff --git a/drivers/staging/batman-adv/originator.c b/drivers/staging/batman-adv/originator.c
index 28bb627ffa13..de5a8c1a8104 100644
--- a/drivers/staging/batman-adv/originator.c
+++ b/drivers/staging/batman-adv/originator.c
@@ -391,11 +391,12 @@ static int orig_node_add_if(struct orig_node *orig_node, int max_if_num)
391int orig_hash_add_if(struct batman_if *batman_if, int max_if_num) 391int orig_hash_add_if(struct batman_if *batman_if, int max_if_num)
392{ 392{
393 struct orig_node *orig_node; 393 struct orig_node *orig_node;
394 unsigned long flags;
394 HASHIT(hashit); 395 HASHIT(hashit);
395 396
396 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on 397 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
397 * if_num */ 398 * if_num */
398 spin_lock(&orig_hash_lock); 399 spin_lock_irqsave(&orig_hash_lock, flags);
399 400
400 while (hash_iterate(orig_hash, &hashit)) { 401 while (hash_iterate(orig_hash, &hashit)) {
401 orig_node = hashit.bucket->data; 402 orig_node = hashit.bucket->data;
@@ -404,11 +405,11 @@ int orig_hash_add_if(struct batman_if *batman_if, int max_if_num)
404 goto err; 405 goto err;
405 } 406 }
406 407
407 spin_unlock(&orig_hash_lock); 408 spin_unlock_irqrestore(&orig_hash_lock, flags);
408 return 0; 409 return 0;
409 410
410err: 411err:
411 spin_unlock(&orig_hash_lock); 412 spin_unlock_irqrestore(&orig_hash_lock, flags);
412 return -ENOMEM; 413 return -ENOMEM;
413} 414}
414 415
@@ -468,12 +469,13 @@ int orig_hash_del_if(struct batman_if *batman_if, int max_if_num)
468{ 469{
469 struct batman_if *batman_if_tmp; 470 struct batman_if *batman_if_tmp;
470 struct orig_node *orig_node; 471 struct orig_node *orig_node;
472 unsigned long flags;
471 HASHIT(hashit); 473 HASHIT(hashit);
472 int ret; 474 int ret;
473 475
474 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on 476 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
475 * if_num */ 477 * if_num */
476 spin_lock(&orig_hash_lock); 478 spin_lock_irqsave(&orig_hash_lock, flags);
477 479
478 while (hash_iterate(orig_hash, &hashit)) { 480 while (hash_iterate(orig_hash, &hashit)) {
479 orig_node = hashit.bucket->data; 481 orig_node = hashit.bucket->data;
@@ -500,10 +502,10 @@ int orig_hash_del_if(struct batman_if *batman_if, int max_if_num)
500 rcu_read_unlock(); 502 rcu_read_unlock();
501 503
502 batman_if->if_num = -1; 504 batman_if->if_num = -1;
503 spin_unlock(&orig_hash_lock); 505 spin_unlock_irqrestore(&orig_hash_lock, flags);
504 return 0; 506 return 0;
505 507
506err: 508err:
507 spin_unlock(&orig_hash_lock); 509 spin_unlock_irqrestore(&orig_hash_lock, flags);
508 return -ENOMEM; 510 return -ENOMEM;
509} 511}
diff --git a/drivers/staging/batman-adv/routing.c b/drivers/staging/batman-adv/routing.c
index 066cc9149bf1..032195e6de94 100644
--- a/drivers/staging/batman-adv/routing.c
+++ b/drivers/staging/batman-adv/routing.c
@@ -783,6 +783,8 @@ int recv_bat_packet(struct sk_buff *skb,
783 783
784static int recv_my_icmp_packet(struct sk_buff *skb, size_t icmp_len) 784static int recv_my_icmp_packet(struct sk_buff *skb, size_t icmp_len)
785{ 785{
786 /* FIXME: each batman_if will be attached to a softif */
787 struct bat_priv *bat_priv = netdev_priv(soft_device);
786 struct orig_node *orig_node; 788 struct orig_node *orig_node;
787 struct icmp_packet_rr *icmp_packet; 789 struct icmp_packet_rr *icmp_packet;
788 struct ethhdr *ethhdr; 790 struct ethhdr *ethhdr;
@@ -801,6 +803,9 @@ static int recv_my_icmp_packet(struct sk_buff *skb, size_t icmp_len)
801 return NET_RX_DROP; 803 return NET_RX_DROP;
802 } 804 }
803 805
806 if (!bat_priv->primary_if)
807 return NET_RX_DROP;
808
804 /* answer echo request (ping) */ 809 /* answer echo request (ping) */
805 /* get routing information */ 810 /* get routing information */
806 spin_lock_irqsave(&orig_hash_lock, flags); 811 spin_lock_irqsave(&orig_hash_lock, flags);
@@ -830,7 +835,8 @@ static int recv_my_icmp_packet(struct sk_buff *skb, size_t icmp_len)
830 } 835 }
831 836
832 memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN); 837 memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
833 memcpy(icmp_packet->orig, ethhdr->h_dest, ETH_ALEN); 838 memcpy(icmp_packet->orig,
839 bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
834 icmp_packet->msg_type = ECHO_REPLY; 840 icmp_packet->msg_type = ECHO_REPLY;
835 icmp_packet->ttl = TTL; 841 icmp_packet->ttl = TTL;
836 842
@@ -845,6 +851,8 @@ static int recv_my_icmp_packet(struct sk_buff *skb, size_t icmp_len)
845 851
846static int recv_icmp_ttl_exceeded(struct sk_buff *skb, size_t icmp_len) 852static int recv_icmp_ttl_exceeded(struct sk_buff *skb, size_t icmp_len)
847{ 853{
854 /* FIXME: each batman_if will be attached to a softif */
855 struct bat_priv *bat_priv = netdev_priv(soft_device);
848 struct orig_node *orig_node; 856 struct orig_node *orig_node;
849 struct icmp_packet *icmp_packet; 857 struct icmp_packet *icmp_packet;
850 struct ethhdr *ethhdr; 858 struct ethhdr *ethhdr;
@@ -865,6 +873,9 @@ static int recv_icmp_ttl_exceeded(struct sk_buff *skb, size_t icmp_len)
865 return NET_RX_DROP; 873 return NET_RX_DROP;
866 } 874 }
867 875
876 if (!bat_priv->primary_if)
877 return NET_RX_DROP;
878
868 /* get routing information */ 879 /* get routing information */
869 spin_lock_irqsave(&orig_hash_lock, flags); 880 spin_lock_irqsave(&orig_hash_lock, flags);
870 orig_node = ((struct orig_node *) 881 orig_node = ((struct orig_node *)
@@ -892,7 +903,8 @@ static int recv_icmp_ttl_exceeded(struct sk_buff *skb, size_t icmp_len)
892 } 903 }
893 904
894 memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN); 905 memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
895 memcpy(icmp_packet->orig, ethhdr->h_dest, ETH_ALEN); 906 memcpy(icmp_packet->orig,
907 bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
896 icmp_packet->msg_type = TTL_EXCEEDED; 908 icmp_packet->msg_type = TTL_EXCEEDED;
897 icmp_packet->ttl = TTL; 909 icmp_packet->ttl = TTL;
898 910
diff --git a/drivers/staging/batman-adv/send.c b/drivers/staging/batman-adv/send.c
index 055edee7b4e4..da3c82e47bbd 100644
--- a/drivers/staging/batman-adv/send.c
+++ b/drivers/staging/batman-adv/send.c
@@ -29,7 +29,6 @@
29#include "vis.h" 29#include "vis.h"
30#include "aggregation.h" 30#include "aggregation.h"
31 31
32#include <linux/netfilter_bridge.h>
33 32
34static void send_outstanding_bcast_packet(struct work_struct *work); 33static void send_outstanding_bcast_packet(struct work_struct *work);
35 34
@@ -92,12 +91,9 @@ int send_skb_packet(struct sk_buff *skb,
92 91
93 /* dev_queue_xmit() returns a negative result on error. However on 92 /* dev_queue_xmit() returns a negative result on error. However on
94 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP 93 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
95 * (which is > 0). This will not be treated as an error. 94 * (which is > 0). This will not be treated as an error. */
96 * Also, if netfilter/ebtables wants to block outgoing batman
97 * packets then giving them a chance to do so here */
98 95
99 return NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev, 96 return dev_queue_xmit(skb);
100 dev_queue_xmit);
101send_skb_err: 97send_skb_err:
102 kfree_skb(skb); 98 kfree_skb(skb);
103 return NET_XMIT_DROP; 99 return NET_XMIT_DROP;
diff --git a/drivers/staging/batman-adv/types.h b/drivers/staging/batman-adv/types.h
index 21d0717afb09..9aa9d369c752 100644
--- a/drivers/staging/batman-adv/types.h
+++ b/drivers/staging/batman-adv/types.h
@@ -126,6 +126,7 @@ struct socket_client {
126 unsigned char index; 126 unsigned char index;
127 spinlock_t lock; 127 spinlock_t lock;
128 wait_queue_head_t queue_wait; 128 wait_queue_head_t queue_wait;
129 struct bat_priv *bat_priv;
129}; 130};
130 131
131struct socket_packet { 132struct socket_packet {
diff --git a/drivers/staging/comedi/drivers/das08_cs.c b/drivers/staging/comedi/drivers/das08_cs.c
index c6aa52f8dcee..48d9fb1227df 100644
--- a/drivers/staging/comedi/drivers/das08_cs.c
+++ b/drivers/staging/comedi/drivers/das08_cs.c
@@ -222,7 +222,6 @@ static int das08_pcmcia_config_loop(struct pcmcia_device *p_dev,
222 p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; 222 p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
223 p_dev->resource[0]->flags |= 223 p_dev->resource[0]->flags |=
224 pcmcia_io_cfg_data_width(io->flags); 224 pcmcia_io_cfg_data_width(io->flags);
225 p_dev->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK;
226 p_dev->resource[0]->start = io->win[0].base; 225 p_dev->resource[0]->start = io->win[0].base;
227 p_dev->resource[0]->end = io->win[0].len; 226 p_dev->resource[0]->end = io->win[0].len;
228 if (io->nwin > 1) { 227 if (io->nwin > 1) {
diff --git a/drivers/staging/hv/netvsc_drv.c b/drivers/staging/hv/netvsc_drv.c
index 56e11575c977..64a01147ecae 100644
--- a/drivers/staging/hv/netvsc_drv.c
+++ b/drivers/staging/hv/netvsc_drv.c
@@ -327,6 +327,9 @@ static const struct net_device_ops device_ops = {
327 .ndo_stop = netvsc_close, 327 .ndo_stop = netvsc_close,
328 .ndo_start_xmit = netvsc_start_xmit, 328 .ndo_start_xmit = netvsc_start_xmit,
329 .ndo_set_multicast_list = netvsc_set_multicast_list, 329 .ndo_set_multicast_list = netvsc_set_multicast_list,
330 .ndo_change_mtu = eth_change_mtu,
331 .ndo_validate_addr = eth_validate_addr,
332 .ndo_set_mac_address = eth_mac_addr,
330}; 333};
331 334
332static int netvsc_probe(struct device *device) 335static int netvsc_probe(struct device *device)
diff --git a/drivers/staging/hv/ring_buffer.c b/drivers/staging/hv/ring_buffer.c
index 17bc7626f70a..d78c569ac94a 100644
--- a/drivers/staging/hv/ring_buffer.c
+++ b/drivers/staging/hv/ring_buffer.c
@@ -193,8 +193,7 @@ Description:
193static inline u64 193static inline u64
194GetRingBufferIndices(struct hv_ring_buffer_info *RingInfo) 194GetRingBufferIndices(struct hv_ring_buffer_info *RingInfo)
195{ 195{
196 return ((u64)RingInfo->RingBuffer->WriteIndex << 32) 196 return (u64)RingInfo->RingBuffer->WriteIndex << 32;
197 || RingInfo->RingBuffer->ReadIndex;
198} 197}
199 198
200 199
diff --git a/drivers/staging/hv/storvsc_api.h b/drivers/staging/hv/storvsc_api.h
index 0063bde9a4b2..8505a1c5f9ee 100644
--- a/drivers/staging/hv/storvsc_api.h
+++ b/drivers/staging/hv/storvsc_api.h
@@ -28,10 +28,10 @@
28#include "vmbus_api.h" 28#include "vmbus_api.h"
29 29
30/* Defines */ 30/* Defines */
31#define STORVSC_RING_BUFFER_SIZE (10*PAGE_SIZE) 31#define STORVSC_RING_BUFFER_SIZE (20*PAGE_SIZE)
32#define BLKVSC_RING_BUFFER_SIZE (20*PAGE_SIZE) 32#define BLKVSC_RING_BUFFER_SIZE (20*PAGE_SIZE)
33 33
34#define STORVSC_MAX_IO_REQUESTS 64 34#define STORVSC_MAX_IO_REQUESTS 128
35 35
36/* 36/*
37 * In Hyper-V, each port/path/target maps to 1 scsi host adapter. In 37 * In Hyper-V, each port/path/target maps to 1 scsi host adapter. In
diff --git a/drivers/staging/hv/storvsc_drv.c b/drivers/staging/hv/storvsc_drv.c
index 075b61bd492f..62882a437aa4 100644
--- a/drivers/staging/hv/storvsc_drv.c
+++ b/drivers/staging/hv/storvsc_drv.c
@@ -495,7 +495,7 @@ static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
495 495
496 /* ASSERT(orig_sgl[i].offset + orig_sgl[i].length <= PAGE_SIZE); */ 496 /* ASSERT(orig_sgl[i].offset + orig_sgl[i].length <= PAGE_SIZE); */
497 497
498 if (j == 0) 498 if (bounce_addr == 0)
499 bounce_addr = (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])), KM_IRQ0); 499 bounce_addr = (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])), KM_IRQ0);
500 500
501 while (srclen) { 501 while (srclen) {
@@ -556,7 +556,7 @@ static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
556 destlen = orig_sgl[i].length; 556 destlen = orig_sgl[i].length;
557 /* ASSERT(orig_sgl[i].offset + orig_sgl[i].length <= PAGE_SIZE); */ 557 /* ASSERT(orig_sgl[i].offset + orig_sgl[i].length <= PAGE_SIZE); */
558 558
559 if (j == 0) 559 if (bounce_addr == 0)
560 bounce_addr = (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])), KM_IRQ0); 560 bounce_addr = (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])), KM_IRQ0);
561 561
562 while (destlen) { 562 while (destlen) {
@@ -615,6 +615,7 @@ static int storvsc_queuecommand(struct scsi_cmnd *scmnd,
615 unsigned int request_size = 0; 615 unsigned int request_size = 0;
616 int i; 616 int i;
617 struct scatterlist *sgl; 617 struct scatterlist *sgl;
618 unsigned int sg_count = 0;
618 619
619 DPRINT_DBG(STORVSC_DRV, "scmnd %p dir %d, use_sg %d buf %p len %d " 620 DPRINT_DBG(STORVSC_DRV, "scmnd %p dir %d, use_sg %d buf %p len %d "
620 "queue depth %d tagged %d", scmnd, scmnd->sc_data_direction, 621 "queue depth %d tagged %d", scmnd, scmnd->sc_data_direction,
@@ -697,6 +698,7 @@ static int storvsc_queuecommand(struct scsi_cmnd *scmnd,
697 request->DataBuffer.Length = scsi_bufflen(scmnd); 698 request->DataBuffer.Length = scsi_bufflen(scmnd);
698 if (scsi_sg_count(scmnd)) { 699 if (scsi_sg_count(scmnd)) {
699 sgl = (struct scatterlist *)scsi_sglist(scmnd); 700 sgl = (struct scatterlist *)scsi_sglist(scmnd);
701 sg_count = scsi_sg_count(scmnd);
700 702
701 /* check if we need to bounce the sgl */ 703 /* check if we need to bounce the sgl */
702 if (do_bounce_buffer(sgl, scsi_sg_count(scmnd)) != -1) { 704 if (do_bounce_buffer(sgl, scsi_sg_count(scmnd)) != -1) {
@@ -731,15 +733,16 @@ static int storvsc_queuecommand(struct scsi_cmnd *scmnd,
731 scsi_sg_count(scmnd)); 733 scsi_sg_count(scmnd));
732 734
733 sgl = cmd_request->bounce_sgl; 735 sgl = cmd_request->bounce_sgl;
736 sg_count = cmd_request->bounce_sgl_count;
734 } 737 }
735 738
736 request->DataBuffer.Offset = sgl[0].offset; 739 request->DataBuffer.Offset = sgl[0].offset;
737 740
738 for (i = 0; i < scsi_sg_count(scmnd); i++) { 741 for (i = 0; i < sg_count; i++) {
739 DPRINT_DBG(STORVSC_DRV, "sgl[%d] len %d offset %d\n", 742 DPRINT_DBG(STORVSC_DRV, "sgl[%d] len %d offset %d\n",
740 i, sgl[i].length, sgl[i].offset); 743 i, sgl[i].length, sgl[i].offset);
741 request->DataBuffer.PfnArray[i] = 744 request->DataBuffer.PfnArray[i] =
742 page_to_pfn(sg_page((&sgl[i]))); 745 page_to_pfn(sg_page((&sgl[i])));
743 } 746 }
744 } else if (scsi_sglist(scmnd)) { 747 } else if (scsi_sglist(scmnd)) {
745 /* ASSERT(scsi_bufflen(scmnd) <= PAGE_SIZE); */ 748 /* ASSERT(scsi_bufflen(scmnd) <= PAGE_SIZE); */
diff --git a/drivers/staging/octeon/Kconfig b/drivers/staging/octeon/Kconfig
index 638ad6b35891..9493128e5fd2 100644
--- a/drivers/staging/octeon/Kconfig
+++ b/drivers/staging/octeon/Kconfig
@@ -1,6 +1,6 @@
1config OCTEON_ETHERNET 1config OCTEON_ETHERNET
2 tristate "Cavium Networks Octeon Ethernet support" 2 tristate "Cavium Networks Octeon Ethernet support"
3 depends on CPU_CAVIUM_OCTEON 3 depends on CPU_CAVIUM_OCTEON && NETDEVICES
4 select PHYLIB 4 select PHYLIB
5 select MDIO_OCTEON 5 select MDIO_OCTEON
6 help 6 help
diff --git a/drivers/staging/rt2860/usb_main_dev.c b/drivers/staging/rt2860/usb_main_dev.c
index a0fe31de0a6d..ebf9074a9083 100644
--- a/drivers/staging/rt2860/usb_main_dev.c
+++ b/drivers/staging/rt2860/usb_main_dev.c
@@ -44,6 +44,7 @@ struct usb_device_id rtusb_usb_id[] = {
44 {USB_DEVICE(0x07B8, 0x2870)}, /* AboCom */ 44 {USB_DEVICE(0x07B8, 0x2870)}, /* AboCom */
45 {USB_DEVICE(0x07B8, 0x2770)}, /* AboCom */ 45 {USB_DEVICE(0x07B8, 0x2770)}, /* AboCom */
46 {USB_DEVICE(0x0DF6, 0x0039)}, /* Sitecom 2770 */ 46 {USB_DEVICE(0x0DF6, 0x0039)}, /* Sitecom 2770 */
47 {USB_DEVICE(0x0DF6, 0x003F)}, /* Sitecom 2770 */
47 {USB_DEVICE(0x083A, 0x7512)}, /* Arcadyan 2770 */ 48 {USB_DEVICE(0x083A, 0x7512)}, /* Arcadyan 2770 */
48 {USB_DEVICE(0x0789, 0x0162)}, /* Logitec 2870 */ 49 {USB_DEVICE(0x0789, 0x0162)}, /* Logitec 2870 */
49 {USB_DEVICE(0x0789, 0x0163)}, /* Logitec 2870 */ 50 {USB_DEVICE(0x0789, 0x0163)}, /* Logitec 2870 */
@@ -95,7 +96,8 @@ struct usb_device_id rtusb_usb_id[] = {
95 {USB_DEVICE(0x050d, 0x815c)}, 96 {USB_DEVICE(0x050d, 0x815c)},
96 {USB_DEVICE(0x1482, 0x3C09)}, /* Abocom */ 97 {USB_DEVICE(0x1482, 0x3C09)}, /* Abocom */
97 {USB_DEVICE(0x14B2, 0x3C09)}, /* Alpha */ 98 {USB_DEVICE(0x14B2, 0x3C09)}, /* Alpha */
98 {USB_DEVICE(0x04E8, 0x2018)}, /* samsung */ 99 {USB_DEVICE(0x04E8, 0x2018)}, /* samsung linkstick2 */
100 {USB_DEVICE(0x1690, 0x0740)}, /* Askey */
99 {USB_DEVICE(0x5A57, 0x0280)}, /* Zinwell */ 101 {USB_DEVICE(0x5A57, 0x0280)}, /* Zinwell */
100 {USB_DEVICE(0x5A57, 0x0282)}, /* Zinwell */ 102 {USB_DEVICE(0x5A57, 0x0282)}, /* Zinwell */
101 {USB_DEVICE(0x7392, 0x7718)}, 103 {USB_DEVICE(0x7392, 0x7718)},
@@ -105,21 +107,34 @@ struct usb_device_id rtusb_usb_id[] = {
105 {USB_DEVICE(0x1737, 0x0071)}, /* Linksys WUSB600N */ 107 {USB_DEVICE(0x1737, 0x0071)}, /* Linksys WUSB600N */
106 {USB_DEVICE(0x0411, 0x00e8)}, /* Buffalo WLI-UC-G300N */ 108 {USB_DEVICE(0x0411, 0x00e8)}, /* Buffalo WLI-UC-G300N */
107 {USB_DEVICE(0x050d, 0x815c)}, /* Belkin F5D8053 */ 109 {USB_DEVICE(0x050d, 0x815c)}, /* Belkin F5D8053 */
110 {USB_DEVICE(0x100D, 0x9031)}, /* Motorola 2770 */
108#endif /* RT2870 // */ 111#endif /* RT2870 // */
109#ifdef RT3070 112#ifdef RT3070
110 {USB_DEVICE(0x148F, 0x3070)}, /* Ralink 3070 */ 113 {USB_DEVICE(0x148F, 0x3070)}, /* Ralink 3070 */
111 {USB_DEVICE(0x148F, 0x3071)}, /* Ralink 3071 */ 114 {USB_DEVICE(0x148F, 0x3071)}, /* Ralink 3071 */
112 {USB_DEVICE(0x148F, 0x3072)}, /* Ralink 3072 */ 115 {USB_DEVICE(0x148F, 0x3072)}, /* Ralink 3072 */
113 {USB_DEVICE(0x0DB0, 0x3820)}, /* Ralink 3070 */ 116 {USB_DEVICE(0x0DB0, 0x3820)}, /* Ralink 3070 */
117 {USB_DEVICE(0x0DB0, 0x871C)}, /* Ralink 3070 */
118 {USB_DEVICE(0x0DB0, 0x822C)}, /* Ralink 3070 */
119 {USB_DEVICE(0x0DB0, 0x871B)}, /* Ralink 3070 */
120 {USB_DEVICE(0x0DB0, 0x822B)}, /* Ralink 3070 */
114 {USB_DEVICE(0x0DF6, 0x003E)}, /* Sitecom 3070 */ 121 {USB_DEVICE(0x0DF6, 0x003E)}, /* Sitecom 3070 */
115 {USB_DEVICE(0x0DF6, 0x0042)}, /* Sitecom 3072 */ 122 {USB_DEVICE(0x0DF6, 0x0042)}, /* Sitecom 3072 */
123 {USB_DEVICE(0x0DF6, 0x0048)}, /* Sitecom 3070 */
124 {USB_DEVICE(0x0DF6, 0x0047)}, /* Sitecom 3071 */
116 {USB_DEVICE(0x14B2, 0x3C12)}, /* AL 3070 */ 125 {USB_DEVICE(0x14B2, 0x3C12)}, /* AL 3070 */
117 {USB_DEVICE(0x18C5, 0x0012)}, /* Corega 3070 */ 126 {USB_DEVICE(0x18C5, 0x0012)}, /* Corega 3070 */
118 {USB_DEVICE(0x083A, 0x7511)}, /* Arcadyan 3070 */ 127 {USB_DEVICE(0x083A, 0x7511)}, /* Arcadyan 3070 */
128 {USB_DEVICE(0x083A, 0xA701)}, /* SMC 3070 */
129 {USB_DEVICE(0x083A, 0xA702)}, /* SMC 3072 */
119 {USB_DEVICE(0x1740, 0x9703)}, /* EnGenius 3070 */ 130 {USB_DEVICE(0x1740, 0x9703)}, /* EnGenius 3070 */
120 {USB_DEVICE(0x1740, 0x9705)}, /* EnGenius 3071 */ 131 {USB_DEVICE(0x1740, 0x9705)}, /* EnGenius 3071 */
121 {USB_DEVICE(0x1740, 0x9706)}, /* EnGenius 3072 */ 132 {USB_DEVICE(0x1740, 0x9706)}, /* EnGenius 3072 */
133 {USB_DEVICE(0x1740, 0x9707)}, /* EnGenius 3070 */
134 {USB_DEVICE(0x1740, 0x9708)}, /* EnGenius 3071 */
135 {USB_DEVICE(0x1740, 0x9709)}, /* EnGenius 3072 */
122 {USB_DEVICE(0x13D3, 0x3273)}, /* AzureWave 3070 */ 136 {USB_DEVICE(0x13D3, 0x3273)}, /* AzureWave 3070 */
137 {USB_DEVICE(0x13D3, 0x3305)}, /* AzureWave 3070*/
123 {USB_DEVICE(0x1044, 0x800D)}, /* Gigabyte GN-WB32L 3070 */ 138 {USB_DEVICE(0x1044, 0x800D)}, /* Gigabyte GN-WB32L 3070 */
124 {USB_DEVICE(0x2019, 0xAB25)}, /* Planex Communications, Inc. RT3070 */ 139 {USB_DEVICE(0x2019, 0xAB25)}, /* Planex Communications, Inc. RT3070 */
125 {USB_DEVICE(0x07B8, 0x3070)}, /* AboCom 3070 */ 140 {USB_DEVICE(0x07B8, 0x3070)}, /* AboCom 3070 */
@@ -132,14 +147,36 @@ struct usb_device_id rtusb_usb_id[] = {
132 {USB_DEVICE(0x07D1, 0x3C0D)}, /* D-Link 3070 */ 147 {USB_DEVICE(0x07D1, 0x3C0D)}, /* D-Link 3070 */
133 {USB_DEVICE(0x07D1, 0x3C0E)}, /* D-Link 3070 */ 148 {USB_DEVICE(0x07D1, 0x3C0E)}, /* D-Link 3070 */
134 {USB_DEVICE(0x07D1, 0x3C0F)}, /* D-Link 3070 */ 149 {USB_DEVICE(0x07D1, 0x3C0F)}, /* D-Link 3070 */
150 {USB_DEVICE(0x07D1, 0x3C16)}, /* D-Link 3070 */
151 {USB_DEVICE(0x07D1, 0x3C17)}, /* D-Link 8070 */
135 {USB_DEVICE(0x1D4D, 0x000C)}, /* Pegatron Corporation 3070 */ 152 {USB_DEVICE(0x1D4D, 0x000C)}, /* Pegatron Corporation 3070 */
136 {USB_DEVICE(0x1D4D, 0x000E)}, /* Pegatron Corporation 3070 */ 153 {USB_DEVICE(0x1D4D, 0x000E)}, /* Pegatron Corporation 3070 */
137 {USB_DEVICE(0x5A57, 0x5257)}, /* Zinwell 3070 */ 154 {USB_DEVICE(0x5A57, 0x5257)}, /* Zinwell 3070 */
138 {USB_DEVICE(0x5A57, 0x0283)}, /* Zinwell 3072 */ 155 {USB_DEVICE(0x5A57, 0x0283)}, /* Zinwell 3072 */
139 {USB_DEVICE(0x04BB, 0x0945)}, /* I-O DATA 3072 */ 156 {USB_DEVICE(0x04BB, 0x0945)}, /* I-O DATA 3072 */
157 {USB_DEVICE(0x04BB, 0x0947)}, /* I-O DATA 3070 */
158 {USB_DEVICE(0x04BB, 0x0948)}, /* I-O DATA 3072 */
140 {USB_DEVICE(0x203D, 0x1480)}, /* Encore 3070 */ 159 {USB_DEVICE(0x203D, 0x1480)}, /* Encore 3070 */
160 {USB_DEVICE(0x20B8, 0x8888)}, /* PARA INDUSTRIAL 3070 */
161 {USB_DEVICE(0x0B05, 0x1784)}, /* Asus 3072 */
162 {USB_DEVICE(0x203D, 0x14A9)}, /* Encore 3070*/
163 {USB_DEVICE(0x0DB0, 0x899A)}, /* MSI 3070*/
164 {USB_DEVICE(0x0DB0, 0x3870)}, /* MSI 3070*/
165 {USB_DEVICE(0x0DB0, 0x870A)}, /* MSI 3070*/
166 {USB_DEVICE(0x0DB0, 0x6899)}, /* MSI 3070 */
167 {USB_DEVICE(0x0DB0, 0x3822)}, /* MSI 3070 */
168 {USB_DEVICE(0x0DB0, 0x3871)}, /* MSI 3070 */
169 {USB_DEVICE(0x0DB0, 0x871A)}, /* MSI 3070 */
170 {USB_DEVICE(0x0DB0, 0x822A)}, /* MSI 3070 */
171 {USB_DEVICE(0x0DB0, 0x3821)}, /* Ralink 3070 */
172 {USB_DEVICE(0x0DB0, 0x821A)}, /* Ralink 3070 */
173 {USB_DEVICE(0x083A, 0xA703)}, /* IO-MAGIC */
174 {USB_DEVICE(0x13D3, 0x3307)}, /* Azurewave */
175 {USB_DEVICE(0x13D3, 0x3321)}, /* Azurewave */
176 {USB_DEVICE(0x07FA, 0x7712)}, /* Edimax */
177 {USB_DEVICE(0x0789, 0x0166)}, /* Edimax */
178 {USB_DEVICE(0x148F, 0x2070)}, /* Edimax */
141#endif /* RT3070 // */ 179#endif /* RT3070 // */
142 {USB_DEVICE(0x0DF6, 0x003F)}, /* Sitecom WL-608 */
143 {USB_DEVICE(0x1737, 0x0077)}, /* Linksys WUSB54GC-EU v3 */ 180 {USB_DEVICE(0x1737, 0x0077)}, /* Linksys WUSB54GC-EU v3 */
144 {USB_DEVICE(0x2001, 0x3C09)}, /* D-Link */ 181 {USB_DEVICE(0x2001, 0x3C09)}, /* D-Link */
145 {USB_DEVICE(0x2001, 0x3C0A)}, /* D-Link 3072 */ 182 {USB_DEVICE(0x2001, 0x3C0A)}, /* D-Link 3072 */
diff --git a/drivers/staging/sep/Kconfig b/drivers/staging/sep/Kconfig
deleted file mode 100644
index 0a9c39c7f2bd..000000000000
--- a/drivers/staging/sep/Kconfig
+++ /dev/null
@@ -1,10 +0,0 @@
1config DX_SEP
2 tristate "Discretix SEP driver"
3# depends on MRST
4 depends on RAR_REGISTER && PCI
5 default y
6 help
7 Discretix SEP driver
8
9 If unsure say M. The compiled module will be
10 called sep_driver.ko
diff --git a/drivers/staging/sep/Makefile b/drivers/staging/sep/Makefile
deleted file mode 100644
index 628d5f919414..000000000000
--- a/drivers/staging/sep/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
1obj-$(CONFIG_DX_SEP) := sep_driver.o
2
diff --git a/drivers/staging/sep/TODO b/drivers/staging/sep/TODO
deleted file mode 100644
index ff0e931dab64..000000000000
--- a/drivers/staging/sep/TODO
+++ /dev/null
@@ -1,8 +0,0 @@
1Todo's so far (from Alan Cox)
2- Fix firmware loading
3- Get firmware into firmware git tree
4- Review and tidy each algorithm function
5- Check whether it can be plugged into any of the kernel crypto API
6 interfaces
7- Do something about the magic shared memory interface and replace it
8 with something saner (in Linux terms)
diff --git a/drivers/staging/sep/sep_dev.h b/drivers/staging/sep/sep_dev.h
deleted file mode 100644
index 9200524bb64d..000000000000
--- a/drivers/staging/sep/sep_dev.h
+++ /dev/null
@@ -1,110 +0,0 @@
1#ifndef __SEP_DEV_H__
2#define __SEP_DEV_H__
3
4/*
5 *
6 * sep_dev.h - Security Processor Device Structures
7 *
8 * Copyright(c) 2009 Intel Corporation. All rights reserved.
9 * Copyright(c) 2009 Discretix. All rights reserved.
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the Free
13 * Software Foundation; either version 2 of the License, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
19 * more details.
20 *
21 * You should have received a copy of the GNU General Public License along with
22 * this program; if not, write to the Free Software Foundation, Inc., 59
23 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 *
25 * CONTACTS:
26 *
27 * Alan Cox alan@linux.intel.com
28 *
29 */
30
31struct sep_device {
32 /* pointer to pci dev */
33 struct pci_dev *pdev;
34
35 unsigned long in_use;
36
37 /* address of the shared memory allocated during init for SEP driver
38 (coherent alloc) */
39 void *shared_addr;
40 /* the physical address of the shared area */
41 dma_addr_t shared_bus;
42
43 /* restricted access region (coherent alloc) */
44 dma_addr_t rar_bus;
45 void *rar_addr;
46 /* firmware regions: cache is at rar_addr */
47 unsigned long cache_size;
48
49 /* follows the cache */
50 dma_addr_t resident_bus;
51 unsigned long resident_size;
52 void *resident_addr;
53
54 /* start address of the access to the SEP registers from driver */
55 void __iomem *reg_addr;
56 /* transaction counter that coordinates the transactions between SEP and HOST */
57 unsigned long send_ct;
58 /* counter for the messages from sep */
59 unsigned long reply_ct;
60 /* counter for the number of bytes allocated in the pool for the current
61 transaction */
62 unsigned long data_pool_bytes_allocated;
63
64 /* array of pointers to the pages that represent input data for the synchronic
65 DMA action */
66 struct page **in_page_array;
67
68 /* array of pointers to the pages that represent out data for the synchronic
69 DMA action */
70 struct page **out_page_array;
71
72 /* number of pages in the sep_in_page_array */
73 unsigned long in_num_pages;
74
75 /* number of pages in the sep_out_page_array */
76 unsigned long out_num_pages;
77
78 /* global data for every flow */
79 struct sep_flow_context_t flows[SEP_DRIVER_NUM_FLOWS];
80
81 /* pointer to the workqueue that handles the flow done interrupts */
82 struct workqueue_struct *flow_wq;
83
84};
85
86static struct sep_device *sep_dev;
87
88static inline void sep_write_reg(struct sep_device *dev, int reg, u32 value)
89{
90 void __iomem *addr = dev->reg_addr + reg;
91 writel(value, addr);
92}
93
94static inline u32 sep_read_reg(struct sep_device *dev, int reg)
95{
96 void __iomem *addr = dev->reg_addr + reg;
97 return readl(addr);
98}
99
100/* wait for SRAM write complete(indirect write */
101static inline void sep_wait_sram_write(struct sep_device *dev)
102{
103 u32 reg_val;
104 do
105 reg_val = sep_read_reg(dev, HW_SRAM_DATA_READY_REG_ADDR);
106 while (!(reg_val & 1));
107}
108
109
110#endif
diff --git a/drivers/staging/sep/sep_driver.c b/drivers/staging/sep/sep_driver.c
deleted file mode 100644
index ecbde3467b1b..000000000000
--- a/drivers/staging/sep/sep_driver.c
+++ /dev/null
@@ -1,2742 +0,0 @@
1/*
2 *
3 * sep_driver.c - Security Processor Driver main group of functions
4 *
5 * Copyright(c) 2009 Intel Corporation. All rights reserved.
6 * Copyright(c) 2009 Discretix. All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc., 59
20 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 *
22 * CONTACTS:
23 *
24 * Mark Allyn mark.a.allyn@intel.com
25 *
26 * CHANGES:
27 *
28 * 2009.06.26 Initial publish
29 *
30 */
31
32#include <linux/init.h>
33#include <linux/module.h>
34#include <linux/fs.h>
35#include <linux/cdev.h>
36#include <linux/kdev_t.h>
37#include <linux/mutex.h>
38#include <linux/sched.h>
39#include <linux/mm.h>
40#include <linux/poll.h>
41#include <linux/wait.h>
42#include <linux/pci.h>
43#include <linux/firmware.h>
44#include <linux/slab.h>
45#include <asm/ioctl.h>
46#include <linux/ioport.h>
47#include <asm/io.h>
48#include <linux/interrupt.h>
49#include <linux/pagemap.h>
50#include <asm/cacheflush.h>
51#include "sep_driver_hw_defs.h"
52#include "sep_driver_config.h"
53#include "sep_driver_api.h"
54#include "sep_dev.h"
55
56#if SEP_DRIVER_ARM_DEBUG_MODE
57
58#define CRYS_SEP_ROM_length 0x4000
59#define CRYS_SEP_ROM_start_address 0x8000C000UL
60#define CRYS_SEP_ROM_start_address_offset 0xC000UL
61#define SEP_ROM_BANK_register 0x80008420UL
62#define SEP_ROM_BANK_register_offset 0x8420UL
63#define SEP_RAR_IO_MEM_REGION_START_ADDRESS 0x82000000
64
65/*
66 * THESE 2 definitions are specific to the board - must be
67 * defined during integration
68 */
69#define SEP_RAR_IO_MEM_REGION_START_ADDRESS 0xFF0D0000
70
71/* 2M size */
72
73static void sep_load_rom_code(struct sep_device *sep)
74{
75 /* Index variables */
76 unsigned long i, k, j;
77 u32 reg;
78 u32 error;
79 u32 warning;
80
81 /* Loading ROM from SEP_ROM_image.h file */
82 k = sizeof(CRYS_SEP_ROM);
83
84 edbg("SEP Driver: DX_CC_TST_SepRomLoader start\n");
85
86 edbg("SEP Driver: k is %lu\n", k);
87 edbg("SEP Driver: sep->reg_addr is %p\n", sep->reg_addr);
88 edbg("SEP Driver: CRYS_SEP_ROM_start_address_offset is %p\n", CRYS_SEP_ROM_start_address_offset);
89
90 for (i = 0; i < 4; i++) {
91 /* write bank */
92 sep_write_reg(sep, SEP_ROM_BANK_register_offset, i);
93
94 for (j = 0; j < CRYS_SEP_ROM_length / 4; j++) {
95 sep_write_reg(sep, CRYS_SEP_ROM_start_address_offset + 4 * j, CRYS_SEP_ROM[i * 0x1000 + j]);
96
97 k = k - 4;
98
99 if (k == 0) {
100 j = CRYS_SEP_ROM_length;
101 i = 4;
102 }
103 }
104 }
105
106 /* reset the SEP */
107 sep_write_reg(sep, HW_HOST_SEP_SW_RST_REG_ADDR, 0x1);
108
109 /* poll for SEP ROM boot finish */
110 do
111 reg = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
112 while (!reg);
113
114 edbg("SEP Driver: ROM polling ended\n");
115
116 switch (reg) {
117 case 0x1:
118 /* fatal error - read erro status from GPRO */
119 error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
120 edbg("SEP Driver: ROM polling case 1\n");
121 break;
122 case 0x4:
123 /* Cold boot ended successfully */
124 case 0x8:
125 /* Warmboot ended successfully */
126 case 0x10:
127 /* ColdWarm boot ended successfully */
128 error = 0;
129 case 0x2:
130 /* Boot First Phase ended */
131 warning = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
132 case 0x20:
133 edbg("SEP Driver: ROM polling case %d\n", reg);
134 break;
135 }
136
137}
138
139#else
140static void sep_load_rom_code(struct sep_device *sep) { }
141#endif /* SEP_DRIVER_ARM_DEBUG_MODE */
142
143
144
145/*----------------------------------------
146 DEFINES
147-----------------------------------------*/
148
149#define BASE_ADDRESS_FOR_SYSTEM 0xfffc0000
150#define SEP_RAR_IO_MEM_REGION_SIZE 0x40000
151
152/*--------------------------------------------
153 GLOBAL variables
154--------------------------------------------*/
155
156/* debug messages level */
157static int debug;
158module_param(debug, int , 0);
159MODULE_PARM_DESC(debug, "Flag to enable SEP debug messages");
160
161/* Keep this a single static object for now to keep the conversion easy */
162
163static struct sep_device sep_instance;
164static struct sep_device *sep_dev = &sep_instance;
165
166/*
167 mutex for the access to the internals of the sep driver
168*/
169static DEFINE_MUTEX(sep_mutex);
170
171
172/* wait queue head (event) of the driver */
173static DECLARE_WAIT_QUEUE_HEAD(sep_event);
174
175/**
176 * sep_load_firmware - copy firmware cache/resident
177 * @sep: device we are loading
178 *
179 * This functions copies the cache and resident from their source
180 * location into destination shared memory.
181 */
182
183static int sep_load_firmware(struct sep_device *sep)
184{
185 const struct firmware *fw;
186 char *cache_name = "sep/cache.image.bin";
187 char *res_name = "sep/resident.image.bin";
188 int error;
189
190 edbg("SEP Driver:rar_virtual is %p\n", sep->rar_addr);
191 edbg("SEP Driver:rar_bus is %08llx\n", (unsigned long long)sep->rar_bus);
192
193 /* load cache */
194 error = request_firmware(&fw, cache_name, &sep->pdev->dev);
195 if (error) {
196 edbg("SEP Driver:cant request cache fw\n");
197 return error;
198 }
199 edbg("SEP Driver:cache %08Zx@%p\n", fw->size, (void *) fw->data);
200
201 memcpy(sep->rar_addr, (void *)fw->data, fw->size);
202 sep->cache_size = fw->size;
203 release_firmware(fw);
204
205 sep->resident_bus = sep->rar_bus + sep->cache_size;
206 sep->resident_addr = sep->rar_addr + sep->cache_size;
207
208 /* load resident */
209 error = request_firmware(&fw, res_name, &sep->pdev->dev);
210 if (error) {
211 edbg("SEP Driver:cant request res fw\n");
212 return error;
213 }
214 edbg("sep: res %08Zx@%p\n", fw->size, (void *)fw->data);
215
216 memcpy(sep->resident_addr, (void *) fw->data, fw->size);
217 sep->resident_size = fw->size;
218 release_firmware(fw);
219
220 edbg("sep: resident v %p b %08llx cache v %p b %08llx\n",
221 sep->resident_addr, (unsigned long long)sep->resident_bus,
222 sep->rar_addr, (unsigned long long)sep->rar_bus);
223 return 0;
224}
225
226MODULE_FIRMWARE("sep/cache.image.bin");
227MODULE_FIRMWARE("sep/resident.image.bin");
228
229/**
230 * sep_map_and_alloc_shared_area - allocate shared block
231 * @sep: security processor
232 * @size: size of shared area
233 *
234 * Allocate a shared buffer in host memory that can be used by both the
235 * kernel and also the hardware interface via DMA.
236 */
237
238static int sep_map_and_alloc_shared_area(struct sep_device *sep,
239 unsigned long size)
240{
241 /* shared_addr = ioremap_nocache(0xda00000,shared_area_size); */
242 sep->shared_addr = dma_alloc_coherent(&sep->pdev->dev, size,
243 &sep->shared_bus, GFP_KERNEL);
244
245 if (!sep->shared_addr) {
246 edbg("sep_driver :shared memory dma_alloc_coherent failed\n");
247 return -ENOMEM;
248 }
249 /* set the bus address of the shared area */
250 edbg("sep: shared_addr %ld bytes @%p (bus %08llx)\n",
251 size, sep->shared_addr, (unsigned long long)sep->shared_bus);
252 return 0;
253}
254
255/**
256 * sep_unmap_and_free_shared_area - free shared block
257 * @sep: security processor
258 *
259 * Free the shared area allocated to the security processor. The
260 * processor must have finished with this and any final posted
261 * writes cleared before we do so.
262 */
263static void sep_unmap_and_free_shared_area(struct sep_device *sep, int size)
264{
265 dma_free_coherent(&sep->pdev->dev, size,
266 sep->shared_addr, sep->shared_bus);
267}
268
269/**
270 * sep_shared_virt_to_bus - convert bus/virt addresses
271 *
272 * Returns the bus address inside the shared area according
273 * to the virtual address.
274 */
275
276static dma_addr_t sep_shared_virt_to_bus(struct sep_device *sep,
277 void *virt_address)
278{
279 dma_addr_t pa = sep->shared_bus + (virt_address - sep->shared_addr);
280 edbg("sep: virt to bus b %08llx v %p\n", (unsigned long long) pa,
281 virt_address);
282 return pa;
283}
284
285/**
286 * sep_shared_bus_to_virt - convert bus/virt addresses
287 *
288 * Returns virtual address inside the shared area according
289 * to the bus address.
290 */
291
292static void *sep_shared_bus_to_virt(struct sep_device *sep,
293 dma_addr_t bus_address)
294{
295 return sep->shared_addr + (bus_address - sep->shared_bus);
296}
297
298
299/**
300 * sep_try_open - attempt to open a SEP device
301 * @sep: device to attempt to open
302 *
303 * Atomically attempt to get ownership of a SEP device.
304 * Returns 1 if the device was opened, 0 on failure.
305 */
306
307static int sep_try_open(struct sep_device *sep)
308{
309 if (!test_and_set_bit(0, &sep->in_use))
310 return 1;
311 return 0;
312}
313
314/**
315 * sep_open - device open method
316 * @inode: inode of sep device
317 * @filp: file handle to sep device
318 *
319 * Open method for the SEP device. Called when userspace opens
320 * the SEP device node. Must also release the memory data pool
321 * allocations.
322 *
323 * Returns zero on success otherwise an error code.
324 */
325
326static int sep_open(struct inode *inode, struct file *filp)
327{
328 if (sep_dev == NULL)
329 return -ENODEV;
330
331 /* check the blocking mode */
332 if (filp->f_flags & O_NDELAY) {
333 if (sep_try_open(sep_dev) == 0)
334 return -EAGAIN;
335 } else
336 if (wait_event_interruptible(sep_event, sep_try_open(sep_dev)) < 0)
337 return -EINTR;
338
339 /* Bind to the device, we only have one which makes it easy */
340 filp->private_data = sep_dev;
341 /* release data pool allocations */
342 sep_dev->data_pool_bytes_allocated = 0;
343 return 0;
344}
345
346
347/**
348 * sep_release - close a SEP device
349 * @inode: inode of SEP device
350 * @filp: file handle being closed
351 *
352 * Called on the final close of a SEP device. As the open protects against
353 * multiple simultaenous opens that means this method is called when the
354 * final reference to the open handle is dropped.
355 */
356
357static int sep_release(struct inode *inode, struct file *filp)
358{
359 struct sep_device *sep = filp->private_data;
360#if 0 /*!SEP_DRIVER_POLLING_MODE */
361 /* close IMR */
362 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, 0x7FFF);
363 /* release IRQ line */
364 free_irq(SEP_DIRVER_IRQ_NUM, sep);
365
366#endif
367 /* Ensure any blocked open progresses */
368 clear_bit(0, &sep->in_use);
369 wake_up(&sep_event);
370 return 0;
371}
372
373/*---------------------------------------------------------------
374 map function - this functions maps the message shared area
375-----------------------------------------------------------------*/
376static int sep_mmap(struct file *filp, struct vm_area_struct *vma)
377{
378 dma_addr_t bus_addr;
379 struct sep_device *sep = filp->private_data;
380
381 dbg("-------->SEP Driver: mmap start\n");
382
383 /* check that the size of the mapped range is as the size of the message
384 shared area */
385 if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
386 edbg("SEP Driver mmap requested size is more than allowed\n");
387 printk(KERN_WARNING "SEP Driver mmap requested size is more than allowed\n");
388 printk(KERN_WARNING "SEP Driver vma->vm_end is %08lx\n", vma->vm_end);
389 printk(KERN_WARNING "SEP Driver vma->vm_end is %08lx\n", vma->vm_start);
390 return -EAGAIN;
391 }
392
393 edbg("SEP Driver:sep->shared_addr is %p\n", sep->shared_addr);
394
395 /* get bus address */
396 bus_addr = sep->shared_bus;
397
398 edbg("SEP Driver: phys_addr is %08llx\n", (unsigned long long)bus_addr);
399
400 if (remap_pfn_range(vma, vma->vm_start, bus_addr >> PAGE_SHIFT, vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
401 edbg("SEP Driver remap_page_range failed\n");
402 printk(KERN_WARNING "SEP Driver remap_page_range failed\n");
403 return -EAGAIN;
404 }
405
406 dbg("SEP Driver:<-------- mmap end\n");
407
408 return 0;
409}
410
411
412/*-----------------------------------------------
413 poll function
414*----------------------------------------------*/
415static unsigned int sep_poll(struct file *filp, poll_table * wait)
416{
417 unsigned long count;
418 unsigned int mask = 0;
419 unsigned long retval = 0; /* flow id */
420 struct sep_device *sep = filp->private_data;
421
422 dbg("---------->SEP Driver poll: start\n");
423
424
425#if SEP_DRIVER_POLLING_MODE
426
427 while (sep->send_ct != (retval & 0x7FFFFFFF)) {
428 retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
429
430 for (count = 0; count < 10 * 4; count += 4)
431 edbg("Poll Debug Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_addr + SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES + count)));
432 }
433
434 sep->reply_ct++;
435#else
436 /* add the event to the polling wait table */
437 poll_wait(filp, &sep_event, wait);
438
439#endif
440
441 edbg("sep->send_ct is %lu\n", sep->send_ct);
442 edbg("sep->reply_ct is %lu\n", sep->reply_ct);
443
444 /* check if the data is ready */
445 if (sep->send_ct == sep->reply_ct) {
446 for (count = 0; count < 12 * 4; count += 4)
447 edbg("Sep Mesg Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_addr + count)));
448
449 for (count = 0; count < 10 * 4; count += 4)
450 edbg("Debug Data Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_addr + 0x1800 + count)));
451
452 retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
453 edbg("retval is %lu\n", retval);
454 /* check if the this is sep reply or request */
455 if (retval >> 31) {
456 edbg("SEP Driver: sep request in\n");
457 /* request */
458 mask |= POLLOUT | POLLWRNORM;
459 } else {
460 edbg("SEP Driver: sep reply in\n");
461 mask |= POLLIN | POLLRDNORM;
462 }
463 }
464 dbg("SEP Driver:<-------- poll exit\n");
465 return mask;
466}
467
468/**
469 * sep_time_address - address in SEP memory of time
470 * @sep: SEP device we want the address from
471 *
472 * Return the address of the two dwords in memory used for time
473 * setting.
474 */
475
476static u32 *sep_time_address(struct sep_device *sep)
477{
478 return sep->shared_addr + SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES;
479}
480
481/**
482 * sep_set_time - set the SEP time
483 * @sep: the SEP we are setting the time for
484 *
485 * Calculates time and sets it at the predefined address.
486 * Called with the sep mutex held.
487 */
488static unsigned long sep_set_time(struct sep_device *sep)
489{
490 struct timeval time;
491 u32 *time_addr; /* address of time as seen by the kernel */
492
493
494 dbg("sep:sep_set_time start\n");
495
496 do_gettimeofday(&time);
497
498 /* set value in the SYSTEM MEMORY offset */
499 time_addr = sep_time_address(sep);
500
501 time_addr[0] = SEP_TIME_VAL_TOKEN;
502 time_addr[1] = time.tv_sec;
503
504 edbg("SEP Driver:time.tv_sec is %lu\n", time.tv_sec);
505 edbg("SEP Driver:time_addr is %p\n", time_addr);
506 edbg("SEP Driver:sep->shared_addr is %p\n", sep->shared_addr);
507
508 return time.tv_sec;
509}
510
511/**
512 * sep_dump_message - dump the message that is pending
513 * @sep: sep device
514 *
515 * Dump out the message pending in the shared message area
516 */
517
518static void sep_dump_message(struct sep_device *sep)
519{
520 int count;
521 for (count = 0; count < 12 * 4; count += 4)
522 edbg("Word %d of the message is %u\n", count, *((u32 *) (sep->shared_addr + count)));
523}
524
525/**
526 * sep_send_command_handler - kick off a command
527 * @sep: sep being signalled
528 *
529 * This function raises interrupt to SEP that signals that is has a new
530 * command from the host
531 */
532
533static void sep_send_command_handler(struct sep_device *sep)
534{
535 dbg("sep:sep_send_command_handler start\n");
536
537 mutex_lock(&sep_mutex);
538 sep_set_time(sep);
539
540 /* FIXME: flush cache */
541 flush_cache_all();
542
543 sep_dump_message(sep);
544 /* update counter */
545 sep->send_ct++;
546 /* send interrupt to SEP */
547 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
548 dbg("SEP Driver:<-------- sep_send_command_handler end\n");
549 mutex_unlock(&sep_mutex);
550 return;
551}
552
553/**
554 * sep_send_reply_command_handler - kick off a command reply
555 * @sep: sep being signalled
556 *
557 * This function raises interrupt to SEP that signals that is has a new
558 * command from the host
559 */
560
561static void sep_send_reply_command_handler(struct sep_device *sep)
562{
563 dbg("sep:sep_send_reply_command_handler start\n");
564
565 /* flash cache */
566 flush_cache_all();
567
568 sep_dump_message(sep);
569
570 mutex_lock(&sep_mutex);
571 sep->send_ct++; /* update counter */
572 /* send the interrupt to SEP */
573 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR2_REG_ADDR, sep->send_ct);
574 /* update both counters */
575 sep->send_ct++;
576 sep->reply_ct++;
577 mutex_unlock(&sep_mutex);
578 dbg("sep: sep_send_reply_command_handler end\n");
579}
580
581/*
582 This function handles the allocate data pool memory request
583 This function returns calculates the bus address of the
584 allocated memory, and the offset of this area from the mapped address.
585 Therefore, the FVOs in user space can calculate the exact virtual
586 address of this allocated memory
587*/
588static int sep_allocate_data_pool_memory_handler(struct sep_device *sep,
589 unsigned long arg)
590{
591 int error;
592 struct sep_driver_alloc_t command_args;
593
594 dbg("SEP Driver:--------> sep_allocate_data_pool_memory_handler start\n");
595
596 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_alloc_t));
597 if (error) {
598 error = -EFAULT;
599 goto end_function;
600 }
601
602 /* allocate memory */
603 if ((sep->data_pool_bytes_allocated + command_args.num_bytes) > SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) {
604 error = -ENOMEM;
605 goto end_function;
606 }
607
608 /* set the virtual and bus address */
609 command_args.offset = SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + sep->data_pool_bytes_allocated;
610 command_args.phys_address = sep->shared_bus + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + sep->data_pool_bytes_allocated;
611
612 /* write the memory back to the user space */
613 error = copy_to_user((void *) arg, (void *) &command_args, sizeof(struct sep_driver_alloc_t));
614 if (error) {
615 error = -EFAULT;
616 goto end_function;
617 }
618
619 /* set the allocation */
620 sep->data_pool_bytes_allocated += command_args.num_bytes;
621
622end_function:
623 dbg("SEP Driver:<-------- sep_allocate_data_pool_memory_handler end\n");
624 return error;
625}
626
627/*
628 This function handles write into allocated data pool command
629*/
630static int sep_write_into_data_pool_handler(struct sep_device *sep, unsigned long arg)
631{
632 int error;
633 void *virt_address;
634 unsigned long va;
635 unsigned long app_in_address;
636 unsigned long num_bytes;
637 void *data_pool_area_addr;
638
639 dbg("SEP Driver:--------> sep_write_into_data_pool_handler start\n");
640
641 /* get the application address */
642 error = get_user(app_in_address, &(((struct sep_driver_write_t *) arg)->app_address));
643 if (error)
644 goto end_function;
645
646 /* get the virtual kernel address address */
647 error = get_user(va, &(((struct sep_driver_write_t *) arg)->datapool_address));
648 if (error)
649 goto end_function;
650 virt_address = (void *)va;
651
652 /* get the number of bytes */
653 error = get_user(num_bytes, &(((struct sep_driver_write_t *) arg)->num_bytes));
654 if (error)
655 goto end_function;
656
657 /* calculate the start of the data pool */
658 data_pool_area_addr = sep->shared_addr + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES;
659
660
661 /* check that the range of the virtual kernel address is correct */
662 if (virt_address < data_pool_area_addr || virt_address > (data_pool_area_addr + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES)) {
663 error = -EINVAL;
664 goto end_function;
665 }
666 /* copy the application data */
667 error = copy_from_user(virt_address, (void *) app_in_address, num_bytes);
668 if (error)
669 error = -EFAULT;
670end_function:
671 dbg("SEP Driver:<-------- sep_write_into_data_pool_handler end\n");
672 return error;
673}
674
675/*
676 this function handles the read from data pool command
677*/
678static int sep_read_from_data_pool_handler(struct sep_device *sep, unsigned long arg)
679{
680 int error;
681 /* virtual address of dest application buffer */
682 unsigned long app_out_address;
683 /* virtual address of the data pool */
684 unsigned long va;
685 void *virt_address;
686 unsigned long num_bytes;
687 void *data_pool_area_addr;
688
689 dbg("SEP Driver:--------> sep_read_from_data_pool_handler start\n");
690
691 /* get the application address */
692 error = get_user(app_out_address, &(((struct sep_driver_write_t *) arg)->app_address));
693 if (error)
694 goto end_function;
695
696 /* get the virtual kernel address address */
697 error = get_user(va, &(((struct sep_driver_write_t *) arg)->datapool_address));
698 if (error)
699 goto end_function;
700 virt_address = (void *)va;
701
702 /* get the number of bytes */
703 error = get_user(num_bytes, &(((struct sep_driver_write_t *) arg)->num_bytes));
704 if (error)
705 goto end_function;
706
707 /* calculate the start of the data pool */
708 data_pool_area_addr = sep->shared_addr + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES;
709
710 /* FIXME: These are incomplete all over the driver: what about + len
711 and when doing that also overflows */
712 /* check that the range of the virtual kernel address is correct */
713 if (virt_address < data_pool_area_addr || virt_address > data_pool_area_addr + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) {
714 error = -EINVAL;
715 goto end_function;
716 }
717
718 /* copy the application data */
719 error = copy_to_user((void *) app_out_address, virt_address, num_bytes);
720 if (error)
721 error = -EFAULT;
722end_function:
723 dbg("SEP Driver:<-------- sep_read_from_data_pool_handler end\n");
724 return error;
725}
726
727/*
728 This function releases all the application virtual buffer physical pages,
729 that were previously locked
730*/
731static int sep_free_dma_pages(struct page **page_array_ptr, unsigned long num_pages, unsigned long dirtyFlag)
732{
733 unsigned long count;
734
735 if (dirtyFlag) {
736 for (count = 0; count < num_pages; count++) {
737 /* the out array was written, therefore the data was changed */
738 if (!PageReserved(page_array_ptr[count]))
739 SetPageDirty(page_array_ptr[count]);
740 page_cache_release(page_array_ptr[count]);
741 }
742 } else {
743 /* free in pages - the data was only read, therefore no update was done
744 on those pages */
745 for (count = 0; count < num_pages; count++)
746 page_cache_release(page_array_ptr[count]);
747 }
748
749 if (page_array_ptr)
750 /* free the array */
751 kfree(page_array_ptr);
752
753 return 0;
754}
755
756/*
757 This function locks all the physical pages of the kernel virtual buffer
758 and construct a basic lli array, where each entry holds the physical
759 page address and the size that application data holds in this physical pages
760*/
761static int sep_lock_kernel_pages(struct sep_device *sep,
762 unsigned long kernel_virt_addr,
763 unsigned long data_size,
764 unsigned long *num_pages_ptr,
765 struct sep_lli_entry_t **lli_array_ptr,
766 struct page ***page_array_ptr)
767{
768 int error = 0;
769 /* the the page of the end address of the user space buffer */
770 unsigned long end_page;
771 /* the page of the start address of the user space buffer */
772 unsigned long start_page;
773 /* the range in pages */
774 unsigned long num_pages;
775 struct sep_lli_entry_t *lli_array;
776 /* next kernel address to map */
777 unsigned long next_kernel_address;
778 unsigned long count;
779
780 dbg("SEP Driver:--------> sep_lock_kernel_pages start\n");
781
782 /* set start and end pages and num pages */
783 end_page = (kernel_virt_addr + data_size - 1) >> PAGE_SHIFT;
784 start_page = kernel_virt_addr >> PAGE_SHIFT;
785 num_pages = end_page - start_page + 1;
786
787 edbg("SEP Driver: kernel_virt_addr is %08lx\n", kernel_virt_addr);
788 edbg("SEP Driver: data_size is %lu\n", data_size);
789 edbg("SEP Driver: start_page is %lx\n", start_page);
790 edbg("SEP Driver: end_page is %lx\n", end_page);
791 edbg("SEP Driver: num_pages is %lu\n", num_pages);
792
793 lli_array = kmalloc(sizeof(struct sep_lli_entry_t) * num_pages, GFP_ATOMIC);
794 if (!lli_array) {
795 edbg("SEP Driver: kmalloc for lli_array failed\n");
796 error = -ENOMEM;
797 goto end_function;
798 }
799
800 /* set the start address of the first page - app data may start not at
801 the beginning of the page */
802 lli_array[0].physical_address = (unsigned long) virt_to_phys((unsigned long *) kernel_virt_addr);
803
804 /* check that not all the data is in the first page only */
805 if ((PAGE_SIZE - (kernel_virt_addr & (~PAGE_MASK))) >= data_size)
806 lli_array[0].block_size = data_size;
807 else
808 lli_array[0].block_size = PAGE_SIZE - (kernel_virt_addr & (~PAGE_MASK));
809
810 /* debug print */
811 dbg("lli_array[0].physical_address is %08lx, lli_array[0].block_size is %lu\n", lli_array[0].physical_address, lli_array[0].block_size);
812
813 /* advance the address to the start of the next page */
814 next_kernel_address = (kernel_virt_addr & PAGE_MASK) + PAGE_SIZE;
815
816 /* go from the second page to the prev before last */
817 for (count = 1; count < (num_pages - 1); count++) {
818 lli_array[count].physical_address = (unsigned long) virt_to_phys((unsigned long *) next_kernel_address);
819 lli_array[count].block_size = PAGE_SIZE;
820
821 edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
822 next_kernel_address += PAGE_SIZE;
823 }
824
825 /* if more then 1 pages locked - then update for the last page size needed */
826 if (num_pages > 1) {
827 /* update the address of the last page */
828 lli_array[count].physical_address = (unsigned long) virt_to_phys((unsigned long *) next_kernel_address);
829
830 /* set the size of the last page */
831 lli_array[count].block_size = (kernel_virt_addr + data_size) & (~PAGE_MASK);
832
833 if (lli_array[count].block_size == 0) {
834 dbg("app_virt_addr is %08lx\n", kernel_virt_addr);
835 dbg("data_size is %lu\n", data_size);
836 while (1);
837 }
838
839 edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
840 }
841 /* set output params */
842 *lli_array_ptr = lli_array;
843 *num_pages_ptr = num_pages;
844 *page_array_ptr = 0;
845end_function:
846 dbg("SEP Driver:<-------- sep_lock_kernel_pages end\n");
847 return 0;
848}
849
850/*
851 This function locks all the physical pages of the application virtual buffer
852 and construct a basic lli array, where each entry holds the physical page
853 address and the size that application data holds in this physical pages
854*/
855static int sep_lock_user_pages(struct sep_device *sep,
856 unsigned long app_virt_addr,
857 unsigned long data_size,
858 unsigned long *num_pages_ptr,
859 struct sep_lli_entry_t **lli_array_ptr,
860 struct page ***page_array_ptr)
861{
862 int error = 0;
863 /* the the page of the end address of the user space buffer */
864 unsigned long end_page;
865 /* the page of the start address of the user space buffer */
866 unsigned long start_page;
867 /* the range in pages */
868 unsigned long num_pages;
869 struct page **page_array;
870 struct sep_lli_entry_t *lli_array;
871 unsigned long count;
872 int result;
873
874 dbg("SEP Driver:--------> sep_lock_user_pages start\n");
875
876 /* set start and end pages and num pages */
877 end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
878 start_page = app_virt_addr >> PAGE_SHIFT;
879 num_pages = end_page - start_page + 1;
880
881 edbg("SEP Driver: app_virt_addr is %08lx\n", app_virt_addr);
882 edbg("SEP Driver: data_size is %lu\n", data_size);
883 edbg("SEP Driver: start_page is %lu\n", start_page);
884 edbg("SEP Driver: end_page is %lu\n", end_page);
885 edbg("SEP Driver: num_pages is %lu\n", num_pages);
886
887 /* allocate array of pages structure pointers */
888 page_array = kmalloc(sizeof(struct page *) * num_pages, GFP_ATOMIC);
889 if (!page_array) {
890 edbg("SEP Driver: kmalloc for page_array failed\n");
891
892 error = -ENOMEM;
893 goto end_function;
894 }
895
896 lli_array = kmalloc(sizeof(struct sep_lli_entry_t) * num_pages, GFP_ATOMIC);
897 if (!lli_array) {
898 edbg("SEP Driver: kmalloc for lli_array failed\n");
899
900 error = -ENOMEM;
901 goto end_function_with_error1;
902 }
903
904 /* convert the application virtual address into a set of physical */
905 down_read(&current->mm->mmap_sem);
906 result = get_user_pages(current, current->mm, app_virt_addr, num_pages, 1, 0, page_array, 0);
907 up_read(&current->mm->mmap_sem);
908
909 /* check the number of pages locked - if not all then exit with error */
910 if (result != num_pages) {
911 dbg("SEP Driver: not all pages locked by get_user_pages\n");
912
913 error = -ENOMEM;
914 goto end_function_with_error2;
915 }
916
917 /* flush the cache */
918 for (count = 0; count < num_pages; count++)
919 flush_dcache_page(page_array[count]);
920
921 /* set the start address of the first page - app data may start not at
922 the beginning of the page */
923 lli_array[0].physical_address = ((unsigned long) page_to_phys(page_array[0])) + (app_virt_addr & (~PAGE_MASK));
924
925 /* check that not all the data is in the first page only */
926 if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
927 lli_array[0].block_size = data_size;
928 else
929 lli_array[0].block_size = PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
930
931 /* debug print */
932 dbg("lli_array[0].physical_address is %08lx, lli_array[0].block_size is %lu\n", lli_array[0].physical_address, lli_array[0].block_size);
933
934 /* go from the second page to the prev before last */
935 for (count = 1; count < (num_pages - 1); count++) {
936 lli_array[count].physical_address = (unsigned long) page_to_phys(page_array[count]);
937 lli_array[count].block_size = PAGE_SIZE;
938
939 edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
940 }
941
942 /* if more then 1 pages locked - then update for the last page size needed */
943 if (num_pages > 1) {
944 /* update the address of the last page */
945 lli_array[count].physical_address = (unsigned long) page_to_phys(page_array[count]);
946
947 /* set the size of the last page */
948 lli_array[count].block_size = (app_virt_addr + data_size) & (~PAGE_MASK);
949
950 if (lli_array[count].block_size == 0) {
951 dbg("app_virt_addr is %08lx\n", app_virt_addr);
952 dbg("data_size is %lu\n", data_size);
953 while (1);
954 }
955 edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n",
956 count, lli_array[count].physical_address,
957 count, lli_array[count].block_size);
958 }
959
960 /* set output params */
961 *lli_array_ptr = lli_array;
962 *num_pages_ptr = num_pages;
963 *page_array_ptr = page_array;
964 goto end_function;
965
966end_function_with_error2:
967 /* release the cache */
968 for (count = 0; count < num_pages; count++)
969 page_cache_release(page_array[count]);
970 kfree(lli_array);
971end_function_with_error1:
972 kfree(page_array);
973end_function:
974 dbg("SEP Driver:<-------- sep_lock_user_pages end\n");
975 return 0;
976}
977
978
979/*
980 this function calculates the size of data that can be inserted into the lli
981 table from this array the condition is that either the table is full
982 (all etnries are entered), or there are no more entries in the lli array
983*/
984static unsigned long sep_calculate_lli_table_max_size(struct sep_lli_entry_t *lli_in_array_ptr, unsigned long num_array_entries)
985{
986 unsigned long table_data_size = 0;
987 unsigned long counter;
988
989 /* calculate the data in the out lli table if till we fill the whole
990 table or till the data has ended */
991 for (counter = 0; (counter < (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP - 1)) && (counter < num_array_entries); counter++)
992 table_data_size += lli_in_array_ptr[counter].block_size;
993 return table_data_size;
994}
995
996/*
997 this functions builds ont lli table from the lli_array according to
998 the given size of data
999*/
1000static void sep_build_lli_table(struct sep_lli_entry_t *lli_array_ptr, struct sep_lli_entry_t *lli_table_ptr, unsigned long *num_processed_entries_ptr, unsigned long *num_table_entries_ptr, unsigned long table_data_size)
1001{
1002 unsigned long curr_table_data_size;
1003 /* counter of lli array entry */
1004 unsigned long array_counter;
1005
1006 dbg("SEP Driver:--------> sep_build_lli_table start\n");
1007
1008 /* init currrent table data size and lli array entry counter */
1009 curr_table_data_size = 0;
1010 array_counter = 0;
1011 *num_table_entries_ptr = 1;
1012
1013 edbg("SEP Driver:table_data_size is %lu\n", table_data_size);
1014
1015 /* fill the table till table size reaches the needed amount */
1016 while (curr_table_data_size < table_data_size) {
1017 /* update the number of entries in table */
1018 (*num_table_entries_ptr)++;
1019
1020 lli_table_ptr->physical_address = lli_array_ptr[array_counter].physical_address;
1021 lli_table_ptr->block_size = lli_array_ptr[array_counter].block_size;
1022 curr_table_data_size += lli_table_ptr->block_size;
1023
1024 edbg("SEP Driver:lli_table_ptr is %08lx\n", (unsigned long) lli_table_ptr);
1025 edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1026 edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1027
1028 /* check for overflow of the table data */
1029 if (curr_table_data_size > table_data_size) {
1030 edbg("SEP Driver:curr_table_data_size > table_data_size\n");
1031
1032 /* update the size of block in the table */
1033 lli_table_ptr->block_size -= (curr_table_data_size - table_data_size);
1034
1035 /* update the physical address in the lli array */
1036 lli_array_ptr[array_counter].physical_address += lli_table_ptr->block_size;
1037
1038 /* update the block size left in the lli array */
1039 lli_array_ptr[array_counter].block_size = (curr_table_data_size - table_data_size);
1040 } else
1041 /* advance to the next entry in the lli_array */
1042 array_counter++;
1043
1044 edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1045 edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1046
1047 /* move to the next entry in table */
1048 lli_table_ptr++;
1049 }
1050
1051 /* set the info entry to default */
1052 lli_table_ptr->physical_address = 0xffffffff;
1053 lli_table_ptr->block_size = 0;
1054
1055 edbg("SEP Driver:lli_table_ptr is %08lx\n", (unsigned long) lli_table_ptr);
1056 edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1057 edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1058
1059 /* set the output parameter */
1060 *num_processed_entries_ptr += array_counter;
1061
1062 edbg("SEP Driver:*num_processed_entries_ptr is %lu\n", *num_processed_entries_ptr);
1063 dbg("SEP Driver:<-------- sep_build_lli_table end\n");
1064 return;
1065}
1066
1067/*
1068 this function goes over the list of the print created tables and
1069 prints all the data
1070*/
1071static void sep_debug_print_lli_tables(struct sep_device *sep, struct sep_lli_entry_t *lli_table_ptr, unsigned long num_table_entries, unsigned long table_data_size)
1072{
1073 unsigned long table_count;
1074 unsigned long entries_count;
1075
1076 dbg("SEP Driver:--------> sep_debug_print_lli_tables start\n");
1077
1078 table_count = 1;
1079 while ((unsigned long) lli_table_ptr != 0xffffffff) {
1080 edbg("SEP Driver: lli table %08lx, table_data_size is %lu\n", table_count, table_data_size);
1081 edbg("SEP Driver: num_table_entries is %lu\n", num_table_entries);
1082
1083 /* print entries of the table (without info entry) */
1084 for (entries_count = 0; entries_count < num_table_entries; entries_count++, lli_table_ptr++) {
1085 edbg("SEP Driver:lli_table_ptr address is %08lx\n", (unsigned long) lli_table_ptr);
1086 edbg("SEP Driver:phys address is %08lx block size is %lu\n", lli_table_ptr->physical_address, lli_table_ptr->block_size);
1087 }
1088
1089 /* point to the info entry */
1090 lli_table_ptr--;
1091
1092 edbg("SEP Driver:phys lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1093 edbg("SEP Driver:phys lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1094
1095
1096 table_data_size = lli_table_ptr->block_size & 0xffffff;
1097 num_table_entries = (lli_table_ptr->block_size >> 24) & 0xff;
1098 lli_table_ptr = (struct sep_lli_entry_t *)
1099 (lli_table_ptr->physical_address);
1100
1101 edbg("SEP Driver:phys table_data_size is %lu num_table_entries is %lu lli_table_ptr is%lu\n", table_data_size, num_table_entries, (unsigned long) lli_table_ptr);
1102
1103 if ((unsigned long) lli_table_ptr != 0xffffffff)
1104 lli_table_ptr = (struct sep_lli_entry_t *) sep_shared_bus_to_virt(sep, (unsigned long) lli_table_ptr);
1105
1106 table_count++;
1107 }
1108 dbg("SEP Driver:<-------- sep_debug_print_lli_tables end\n");
1109}
1110
1111
1112/*
1113 This function prepares only input DMA table for synhronic symmetric
1114 operations (HASH)
1115*/
1116static int sep_prepare_input_dma_table(struct sep_device *sep,
1117 unsigned long app_virt_addr,
1118 unsigned long data_size,
1119 unsigned long block_size,
1120 unsigned long *lli_table_ptr,
1121 unsigned long *num_entries_ptr,
1122 unsigned long *table_data_size_ptr,
1123 bool isKernelVirtualAddress)
1124{
1125 /* pointer to the info entry of the table - the last entry */
1126 struct sep_lli_entry_t *info_entry_ptr;
1127 /* array of pointers ot page */
1128 struct sep_lli_entry_t *lli_array_ptr;
1129 /* points to the first entry to be processed in the lli_in_array */
1130 unsigned long current_entry;
1131 /* num entries in the virtual buffer */
1132 unsigned long sep_lli_entries;
1133 /* lli table pointer */
1134 struct sep_lli_entry_t *in_lli_table_ptr;
1135 /* the total data in one table */
1136 unsigned long table_data_size;
1137 /* number of entries in lli table */
1138 unsigned long num_entries_in_table;
1139 /* next table address */
1140 void *lli_table_alloc_addr;
1141 unsigned long result;
1142
1143 dbg("SEP Driver:--------> sep_prepare_input_dma_table start\n");
1144
1145 edbg("SEP Driver:data_size is %lu\n", data_size);
1146 edbg("SEP Driver:block_size is %lu\n", block_size);
1147
1148 /* initialize the pages pointers */
1149 sep->in_page_array = 0;
1150 sep->in_num_pages = 0;
1151
1152 if (data_size == 0) {
1153 /* special case - created 2 entries table with zero data */
1154 in_lli_table_ptr = (struct sep_lli_entry_t *) (sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES);
1155 /* FIXME: Should the entry below not be for _bus */
1156 in_lli_table_ptr->physical_address = (unsigned long)sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1157 in_lli_table_ptr->block_size = 0;
1158
1159 in_lli_table_ptr++;
1160 in_lli_table_ptr->physical_address = 0xFFFFFFFF;
1161 in_lli_table_ptr->block_size = 0;
1162
1163 *lli_table_ptr = sep->shared_bus + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1164 *num_entries_ptr = 2;
1165 *table_data_size_ptr = 0;
1166
1167 goto end_function;
1168 }
1169
1170 /* check if the pages are in Kernel Virtual Address layout */
1171 if (isKernelVirtualAddress == true)
1172 /* lock the pages of the kernel buffer and translate them to pages */
1173 result = sep_lock_kernel_pages(sep, app_virt_addr, data_size, &sep->in_num_pages, &lli_array_ptr, &sep->in_page_array);
1174 else
1175 /* lock the pages of the user buffer and translate them to pages */
1176 result = sep_lock_user_pages(sep, app_virt_addr, data_size, &sep->in_num_pages, &lli_array_ptr, &sep->in_page_array);
1177
1178 if (result)
1179 return result;
1180
1181 edbg("SEP Driver:output sep->in_num_pages is %lu\n", sep->in_num_pages);
1182
1183 current_entry = 0;
1184 info_entry_ptr = 0;
1185 sep_lli_entries = sep->in_num_pages;
1186
1187 /* initiate to point after the message area */
1188 lli_table_alloc_addr = sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1189
1190 /* loop till all the entries in in array are not processed */
1191 while (current_entry < sep_lli_entries) {
1192 /* set the new input and output tables */
1193 in_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
1194
1195 lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1196
1197 /* calculate the maximum size of data for input table */
1198 table_data_size = sep_calculate_lli_table_max_size(&lli_array_ptr[current_entry], (sep_lli_entries - current_entry));
1199
1200 /* now calculate the table size so that it will be module block size */
1201 table_data_size = (table_data_size / block_size) * block_size;
1202
1203 edbg("SEP Driver:output table_data_size is %lu\n", table_data_size);
1204
1205 /* construct input lli table */
1206 sep_build_lli_table(&lli_array_ptr[current_entry], in_lli_table_ptr, &current_entry, &num_entries_in_table, table_data_size);
1207
1208 if (info_entry_ptr == 0) {
1209 /* set the output parameters to physical addresses */
1210 *lli_table_ptr = sep_shared_virt_to_bus(sep, in_lli_table_ptr);
1211 *num_entries_ptr = num_entries_in_table;
1212 *table_data_size_ptr = table_data_size;
1213
1214 edbg("SEP Driver:output lli_table_in_ptr is %08lx\n", *lli_table_ptr);
1215 } else {
1216 /* update the info entry of the previous in table */
1217 info_entry_ptr->physical_address = sep_shared_virt_to_bus(sep, in_lli_table_ptr);
1218 info_entry_ptr->block_size = ((num_entries_in_table) << 24) | (table_data_size);
1219 }
1220
1221 /* save the pointer to the info entry of the current tables */
1222 info_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
1223 }
1224
1225 /* print input tables */
1226 sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *)
1227 sep_shared_bus_to_virt(sep, *lli_table_ptr), *num_entries_ptr, *table_data_size_ptr);
1228
1229 /* the array of the pages */
1230 kfree(lli_array_ptr);
1231end_function:
1232 dbg("SEP Driver:<-------- sep_prepare_input_dma_table end\n");
1233 return 0;
1234
1235}
1236
1237/*
1238 This function creates the input and output dma tables for
1239 symmetric operations (AES/DES) according to the block size from LLI arays
1240*/
1241static int sep_construct_dma_tables_from_lli(struct sep_device *sep,
1242 struct sep_lli_entry_t *lli_in_array,
1243 unsigned long sep_in_lli_entries,
1244 struct sep_lli_entry_t *lli_out_array,
1245 unsigned long sep_out_lli_entries,
1246 unsigned long block_size, unsigned long *lli_table_in_ptr, unsigned long *lli_table_out_ptr, unsigned long *in_num_entries_ptr, unsigned long *out_num_entries_ptr, unsigned long *table_data_size_ptr)
1247{
1248 /* points to the area where next lli table can be allocated: keep void *
1249 as there is pointer scaling to fix otherwise */
1250 void *lli_table_alloc_addr;
1251 /* input lli table */
1252 struct sep_lli_entry_t *in_lli_table_ptr;
1253 /* output lli table */
1254 struct sep_lli_entry_t *out_lli_table_ptr;
1255 /* pointer to the info entry of the table - the last entry */
1256 struct sep_lli_entry_t *info_in_entry_ptr;
1257 /* pointer to the info entry of the table - the last entry */
1258 struct sep_lli_entry_t *info_out_entry_ptr;
1259 /* points to the first entry to be processed in the lli_in_array */
1260 unsigned long current_in_entry;
1261 /* points to the first entry to be processed in the lli_out_array */
1262 unsigned long current_out_entry;
1263 /* max size of the input table */
1264 unsigned long in_table_data_size;
1265 /* max size of the output table */
1266 unsigned long out_table_data_size;
1267 /* flag te signifies if this is the first tables build from the arrays */
1268 unsigned long first_table_flag;
1269 /* the data size that should be in table */
1270 unsigned long table_data_size;
1271 /* number of etnries in the input table */
1272 unsigned long num_entries_in_table;
1273 /* number of etnries in the output table */
1274 unsigned long num_entries_out_table;
1275
1276 dbg("SEP Driver:--------> sep_construct_dma_tables_from_lli start\n");
1277
1278 /* initiate to pint after the message area */
1279 lli_table_alloc_addr = sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1280
1281 current_in_entry = 0;
1282 current_out_entry = 0;
1283 first_table_flag = 1;
1284 info_in_entry_ptr = 0;
1285 info_out_entry_ptr = 0;
1286
1287 /* loop till all the entries in in array are not processed */
1288 while (current_in_entry < sep_in_lli_entries) {
1289 /* set the new input and output tables */
1290 in_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
1291
1292 lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1293
1294 /* set the first output tables */
1295 out_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
1296
1297 lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1298
1299 /* calculate the maximum size of data for input table */
1300 in_table_data_size = sep_calculate_lli_table_max_size(&lli_in_array[current_in_entry], (sep_in_lli_entries - current_in_entry));
1301
1302 /* calculate the maximum size of data for output table */
1303 out_table_data_size = sep_calculate_lli_table_max_size(&lli_out_array[current_out_entry], (sep_out_lli_entries - current_out_entry));
1304
1305 edbg("SEP Driver:in_table_data_size is %lu\n", in_table_data_size);
1306 edbg("SEP Driver:out_table_data_size is %lu\n", out_table_data_size);
1307
1308 /* check where the data is smallest */
1309 table_data_size = in_table_data_size;
1310 if (table_data_size > out_table_data_size)
1311 table_data_size = out_table_data_size;
1312
1313 /* now calculate the table size so that it will be module block size */
1314 table_data_size = (table_data_size / block_size) * block_size;
1315
1316 dbg("SEP Driver:table_data_size is %lu\n", table_data_size);
1317
1318 /* construct input lli table */
1319 sep_build_lli_table(&lli_in_array[current_in_entry], in_lli_table_ptr, &current_in_entry, &num_entries_in_table, table_data_size);
1320
1321 /* construct output lli table */
1322 sep_build_lli_table(&lli_out_array[current_out_entry], out_lli_table_ptr, &current_out_entry, &num_entries_out_table, table_data_size);
1323
1324 /* if info entry is null - this is the first table built */
1325 if (info_in_entry_ptr == 0) {
1326 /* set the output parameters to physical addresses */
1327 *lli_table_in_ptr = sep_shared_virt_to_bus(sep, in_lli_table_ptr);
1328 *in_num_entries_ptr = num_entries_in_table;
1329 *lli_table_out_ptr = sep_shared_virt_to_bus(sep, out_lli_table_ptr);
1330 *out_num_entries_ptr = num_entries_out_table;
1331 *table_data_size_ptr = table_data_size;
1332
1333 edbg("SEP Driver:output lli_table_in_ptr is %08lx\n", *lli_table_in_ptr);
1334 edbg("SEP Driver:output lli_table_out_ptr is %08lx\n", *lli_table_out_ptr);
1335 } else {
1336 /* update the info entry of the previous in table */
1337 info_in_entry_ptr->physical_address = sep_shared_virt_to_bus(sep, in_lli_table_ptr);
1338 info_in_entry_ptr->block_size = ((num_entries_in_table) << 24) | (table_data_size);
1339
1340 /* update the info entry of the previous in table */
1341 info_out_entry_ptr->physical_address = sep_shared_virt_to_bus(sep, out_lli_table_ptr);
1342 info_out_entry_ptr->block_size = ((num_entries_out_table) << 24) | (table_data_size);
1343 }
1344
1345 /* save the pointer to the info entry of the current tables */
1346 info_in_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
1347 info_out_entry_ptr = out_lli_table_ptr + num_entries_out_table - 1;
1348
1349 edbg("SEP Driver:output num_entries_out_table is %lu\n", (unsigned long) num_entries_out_table);
1350 edbg("SEP Driver:output info_in_entry_ptr is %lu\n", (unsigned long) info_in_entry_ptr);
1351 edbg("SEP Driver:output info_out_entry_ptr is %lu\n", (unsigned long) info_out_entry_ptr);
1352 }
1353
1354 /* print input tables */
1355 sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *)
1356 sep_shared_bus_to_virt(sep, *lli_table_in_ptr), *in_num_entries_ptr, *table_data_size_ptr);
1357 /* print output tables */
1358 sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *)
1359 sep_shared_bus_to_virt(sep, *lli_table_out_ptr), *out_num_entries_ptr, *table_data_size_ptr);
1360 dbg("SEP Driver:<-------- sep_construct_dma_tables_from_lli end\n");
1361 return 0;
1362}
1363
1364
1365/*
1366 This function builds input and output DMA tables for synhronic
1367 symmetric operations (AES, DES). It also checks that each table
1368 is of the modular block size
1369*/
1370static int sep_prepare_input_output_dma_table(struct sep_device *sep,
1371 unsigned long app_virt_in_addr,
1372 unsigned long app_virt_out_addr,
1373 unsigned long data_size,
1374 unsigned long block_size,
1375 unsigned long *lli_table_in_ptr, unsigned long *lli_table_out_ptr, unsigned long *in_num_entries_ptr, unsigned long *out_num_entries_ptr, unsigned long *table_data_size_ptr, bool isKernelVirtualAddress)
1376{
1377 /* array of pointers of page */
1378 struct sep_lli_entry_t *lli_in_array;
1379 /* array of pointers of page */
1380 struct sep_lli_entry_t *lli_out_array;
1381 int result = 0;
1382
1383 dbg("SEP Driver:--------> sep_prepare_input_output_dma_table start\n");
1384
1385 /* initialize the pages pointers */
1386 sep->in_page_array = 0;
1387 sep->out_page_array = 0;
1388
1389 /* check if the pages are in Kernel Virtual Address layout */
1390 if (isKernelVirtualAddress == true) {
1391 /* lock the pages of the kernel buffer and translate them to pages */
1392 result = sep_lock_kernel_pages(sep, app_virt_in_addr, data_size, &sep->in_num_pages, &lli_in_array, &sep->in_page_array);
1393 if (result) {
1394 edbg("SEP Driver: sep_lock_kernel_pages for input virtual buffer failed\n");
1395 goto end_function;
1396 }
1397 } else {
1398 /* lock the pages of the user buffer and translate them to pages */
1399 result = sep_lock_user_pages(sep, app_virt_in_addr, data_size, &sep->in_num_pages, &lli_in_array, &sep->in_page_array);
1400 if (result) {
1401 edbg("SEP Driver: sep_lock_user_pages for input virtual buffer failed\n");
1402 goto end_function;
1403 }
1404 }
1405
1406 if (isKernelVirtualAddress == true) {
1407 result = sep_lock_kernel_pages(sep, app_virt_out_addr, data_size, &sep->out_num_pages, &lli_out_array, &sep->out_page_array);
1408 if (result) {
1409 edbg("SEP Driver: sep_lock_kernel_pages for output virtual buffer failed\n");
1410 goto end_function_with_error1;
1411 }
1412 } else {
1413 result = sep_lock_user_pages(sep, app_virt_out_addr, data_size, &sep->out_num_pages, &lli_out_array, &sep->out_page_array);
1414 if (result) {
1415 edbg("SEP Driver: sep_lock_user_pages for output virtual buffer failed\n");
1416 goto end_function_with_error1;
1417 }
1418 }
1419 edbg("sep->in_num_pages is %lu\n", sep->in_num_pages);
1420 edbg("sep->out_num_pages is %lu\n", sep->out_num_pages);
1421 edbg("SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP is %x\n", SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
1422
1423
1424 /* call the fucntion that creates table from the lli arrays */
1425 result = sep_construct_dma_tables_from_lli(sep, lli_in_array, sep->in_num_pages, lli_out_array, sep->out_num_pages, block_size, lli_table_in_ptr, lli_table_out_ptr, in_num_entries_ptr, out_num_entries_ptr, table_data_size_ptr);
1426 if (result) {
1427 edbg("SEP Driver: sep_construct_dma_tables_from_lli failed\n");
1428 goto end_function_with_error2;
1429 }
1430
1431 /* fall through - free the lli entry arrays */
1432 dbg("in_num_entries_ptr is %08lx\n", *in_num_entries_ptr);
1433 dbg("out_num_entries_ptr is %08lx\n", *out_num_entries_ptr);
1434 dbg("table_data_size_ptr is %08lx\n", *table_data_size_ptr);
1435end_function_with_error2:
1436 kfree(lli_out_array);
1437end_function_with_error1:
1438 kfree(lli_in_array);
1439end_function:
1440 dbg("SEP Driver:<-------- sep_prepare_input_output_dma_table end result = %d\n", (int) result);
1441 return result;
1442
1443}
1444
1445/*
1446 this function handles tha request for creation of the DMA table
1447 for the synchronic symmetric operations (AES,DES)
1448*/
1449static int sep_create_sync_dma_tables_handler(struct sep_device *sep,
1450 unsigned long arg)
1451{
1452 int error;
1453 /* command arguments */
1454 struct sep_driver_build_sync_table_t command_args;
1455
1456 dbg("SEP Driver:--------> sep_create_sync_dma_tables_handler start\n");
1457
1458 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_build_sync_table_t));
1459 if (error) {
1460 error = -EFAULT;
1461 goto end_function;
1462 }
1463
1464 edbg("app_in_address is %08lx\n", command_args.app_in_address);
1465 edbg("app_out_address is %08lx\n", command_args.app_out_address);
1466 edbg("data_size is %lu\n", command_args.data_in_size);
1467 edbg("block_size is %lu\n", command_args.block_size);
1468
1469 /* check if we need to build only input table or input/output */
1470 if (command_args.app_out_address)
1471 /* prepare input and output tables */
1472 error = sep_prepare_input_output_dma_table(sep,
1473 command_args.app_in_address,
1474 command_args.app_out_address,
1475 command_args.data_in_size,
1476 command_args.block_size,
1477 &command_args.in_table_address,
1478 &command_args.out_table_address, &command_args.in_table_num_entries, &command_args.out_table_num_entries, &command_args.table_data_size, command_args.isKernelVirtualAddress);
1479 else
1480 /* prepare input tables */
1481 error = sep_prepare_input_dma_table(sep,
1482 command_args.app_in_address,
1483 command_args.data_in_size, command_args.block_size, &command_args.in_table_address, &command_args.in_table_num_entries, &command_args.table_data_size, command_args.isKernelVirtualAddress);
1484
1485 if (error)
1486 goto end_function;
1487 /* copy to user */
1488 if (copy_to_user((void *) arg, (void *) &command_args, sizeof(struct sep_driver_build_sync_table_t)))
1489 error = -EFAULT;
1490end_function:
1491 dbg("SEP Driver:<-------- sep_create_sync_dma_tables_handler end\n");
1492 return error;
1493}
1494
1495/*
1496 this function handles the request for freeing dma table for synhronic actions
1497*/
1498static int sep_free_dma_table_data_handler(struct sep_device *sep)
1499{
1500 dbg("SEP Driver:--------> sep_free_dma_table_data_handler start\n");
1501
1502 /* free input pages array */
1503 sep_free_dma_pages(sep->in_page_array, sep->in_num_pages, 0);
1504
1505 /* free output pages array if needed */
1506 if (sep->out_page_array)
1507 sep_free_dma_pages(sep->out_page_array, sep->out_num_pages, 1);
1508
1509 /* reset all the values */
1510 sep->in_page_array = 0;
1511 sep->out_page_array = 0;
1512 sep->in_num_pages = 0;
1513 sep->out_num_pages = 0;
1514 dbg("SEP Driver:<-------- sep_free_dma_table_data_handler end\n");
1515 return 0;
1516}
1517
1518/*
1519 this function find a space for the new flow dma table
1520*/
1521static int sep_find_free_flow_dma_table_space(struct sep_device *sep,
1522 unsigned long **table_address_ptr)
1523{
1524 int error = 0;
1525 /* pointer to the id field of the flow dma table */
1526 unsigned long *start_table_ptr;
1527 /* Do not make start_addr unsigned long * unless fixing the offset
1528 computations ! */
1529 void *flow_dma_area_start_addr;
1530 unsigned long *flow_dma_area_end_addr;
1531 /* maximum table size in words */
1532 unsigned long table_size_in_words;
1533
1534 /* find the start address of the flow DMA table area */
1535 flow_dma_area_start_addr = sep->shared_addr + SEP_DRIVER_FLOW_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1536
1537 /* set end address of the flow table area */
1538 flow_dma_area_end_addr = flow_dma_area_start_addr + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES;
1539
1540 /* set table size in words */
1541 table_size_in_words = SEP_DRIVER_MAX_FLOW_NUM_ENTRIES_IN_TABLE * (sizeof(struct sep_lli_entry_t) / sizeof(long)) + 2;
1542
1543 /* set the pointer to the start address of DMA area */
1544 start_table_ptr = flow_dma_area_start_addr;
1545
1546 /* find the space for the next table */
1547 while (((*start_table_ptr & 0x7FFFFFFF) != 0) && start_table_ptr < flow_dma_area_end_addr)
1548 start_table_ptr += table_size_in_words;
1549
1550 /* check if we reached the end of floa tables area */
1551 if (start_table_ptr >= flow_dma_area_end_addr)
1552 error = -1;
1553 else
1554 *table_address_ptr = start_table_ptr;
1555
1556 return error;
1557}
1558
1559/*
1560 This function creates one DMA table for flow and returns its data,
1561 and pointer to its info entry
1562*/
1563static int sep_prepare_one_flow_dma_table(struct sep_device *sep,
1564 unsigned long virt_buff_addr,
1565 unsigned long virt_buff_size,
1566 struct sep_lli_entry_t *table_data,
1567 struct sep_lli_entry_t **info_entry_ptr,
1568 struct sep_flow_context_t *flow_data_ptr,
1569 bool isKernelVirtualAddress)
1570{
1571 int error;
1572 /* the range in pages */
1573 unsigned long lli_array_size;
1574 struct sep_lli_entry_t *lli_array;
1575 struct sep_lli_entry_t *flow_dma_table_entry_ptr;
1576 unsigned long *start_dma_table_ptr;
1577 /* total table data counter */
1578 unsigned long dma_table_data_count;
1579 /* pointer that will keep the pointer to the pages of the virtual buffer */
1580 struct page **page_array_ptr;
1581 unsigned long entry_count;
1582
1583 /* find the space for the new table */
1584 error = sep_find_free_flow_dma_table_space(sep, &start_dma_table_ptr);
1585 if (error)
1586 goto end_function;
1587
1588 /* check if the pages are in Kernel Virtual Address layout */
1589 if (isKernelVirtualAddress == true)
1590 /* lock kernel buffer in the memory */
1591 error = sep_lock_kernel_pages(sep, virt_buff_addr, virt_buff_size, &lli_array_size, &lli_array, &page_array_ptr);
1592 else
1593 /* lock user buffer in the memory */
1594 error = sep_lock_user_pages(sep, virt_buff_addr, virt_buff_size, &lli_array_size, &lli_array, &page_array_ptr);
1595
1596 if (error)
1597 goto end_function;
1598
1599 /* set the pointer to page array at the beginning of table - this table is
1600 now considered taken */
1601 *start_dma_table_ptr = lli_array_size;
1602
1603 /* point to the place of the pages pointers of the table */
1604 start_dma_table_ptr++;
1605
1606 /* set the pages pointer */
1607 *start_dma_table_ptr = (unsigned long) page_array_ptr;
1608
1609 /* set the pointer to the first entry */
1610 flow_dma_table_entry_ptr = (struct sep_lli_entry_t *) (++start_dma_table_ptr);
1611
1612 /* now create the entries for table */
1613 for (dma_table_data_count = entry_count = 0; entry_count < lli_array_size; entry_count++) {
1614 flow_dma_table_entry_ptr->physical_address = lli_array[entry_count].physical_address;
1615
1616 flow_dma_table_entry_ptr->block_size = lli_array[entry_count].block_size;
1617
1618 /* set the total data of a table */
1619 dma_table_data_count += lli_array[entry_count].block_size;
1620
1621 flow_dma_table_entry_ptr++;
1622 }
1623
1624 /* set the physical address */
1625 table_data->physical_address = virt_to_phys(start_dma_table_ptr);
1626
1627 /* set the num_entries and total data size */
1628 table_data->block_size = ((lli_array_size + 1) << SEP_NUM_ENTRIES_OFFSET_IN_BITS) | (dma_table_data_count);
1629
1630 /* set the info entry */
1631 flow_dma_table_entry_ptr->physical_address = 0xffffffff;
1632 flow_dma_table_entry_ptr->block_size = 0;
1633
1634 /* set the pointer to info entry */
1635 *info_entry_ptr = flow_dma_table_entry_ptr;
1636
1637 /* the array of the lli entries */
1638 kfree(lli_array);
1639end_function:
1640 return error;
1641}
1642
1643
1644
1645/*
1646 This function creates a list of tables for flow and returns the data for
1647 the first and last tables of the list
1648*/
1649static int sep_prepare_flow_dma_tables(struct sep_device *sep,
1650 unsigned long num_virtual_buffers,
1651 unsigned long first_buff_addr, struct sep_flow_context_t *flow_data_ptr, struct sep_lli_entry_t *first_table_data_ptr, struct sep_lli_entry_t *last_table_data_ptr, bool isKernelVirtualAddress)
1652{
1653 int error;
1654 unsigned long virt_buff_addr;
1655 unsigned long virt_buff_size;
1656 struct sep_lli_entry_t table_data;
1657 struct sep_lli_entry_t *info_entry_ptr;
1658 struct sep_lli_entry_t *prev_info_entry_ptr;
1659 unsigned long i;
1660
1661 /* init vars */
1662 error = 0;
1663 prev_info_entry_ptr = 0;
1664
1665 /* init the first table to default */
1666 table_data.physical_address = 0xffffffff;
1667 first_table_data_ptr->physical_address = 0xffffffff;
1668 table_data.block_size = 0;
1669
1670 for (i = 0; i < num_virtual_buffers; i++) {
1671 /* get the virtual buffer address */
1672 error = get_user(virt_buff_addr, &first_buff_addr);
1673 if (error)
1674 goto end_function;
1675
1676 /* get the virtual buffer size */
1677 first_buff_addr++;
1678 error = get_user(virt_buff_size, &first_buff_addr);
1679 if (error)
1680 goto end_function;
1681
1682 /* advance the address to point to the next pair of address|size */
1683 first_buff_addr++;
1684
1685 /* now prepare the one flow LLI table from the data */
1686 error = sep_prepare_one_flow_dma_table(sep, virt_buff_addr, virt_buff_size, &table_data, &info_entry_ptr, flow_data_ptr, isKernelVirtualAddress);
1687 if (error)
1688 goto end_function;
1689
1690 if (i == 0) {
1691 /* if this is the first table - save it to return to the user
1692 application */
1693 *first_table_data_ptr = table_data;
1694
1695 /* set the pointer to info entry */
1696 prev_info_entry_ptr = info_entry_ptr;
1697 } else {
1698 /* not first table - the previous table info entry should
1699 be updated */
1700 prev_info_entry_ptr->block_size = (0x1 << SEP_INT_FLAG_OFFSET_IN_BITS) | (table_data.block_size);
1701
1702 /* set the pointer to info entry */
1703 prev_info_entry_ptr = info_entry_ptr;
1704 }
1705 }
1706
1707 /* set the last table data */
1708 *last_table_data_ptr = table_data;
1709end_function:
1710 return error;
1711}
1712
1713/*
1714 this function goes over all the flow tables connected to the given
1715 table and deallocate them
1716*/
1717static void sep_deallocated_flow_tables(struct sep_lli_entry_t *first_table_ptr)
1718{
1719 /* id pointer */
1720 unsigned long *table_ptr;
1721 /* end address of the flow dma area */
1722 unsigned long num_entries;
1723 unsigned long num_pages;
1724 struct page **pages_ptr;
1725 /* maximum table size in words */
1726 struct sep_lli_entry_t *info_entry_ptr;
1727
1728 /* set the pointer to the first table */
1729 table_ptr = (unsigned long *) first_table_ptr->physical_address;
1730
1731 /* set the num of entries */
1732 num_entries = (first_table_ptr->block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS)
1733 & SEP_NUM_ENTRIES_MASK;
1734
1735 /* go over all the connected tables */
1736 while (*table_ptr != 0xffffffff) {
1737 /* get number of pages */
1738 num_pages = *(table_ptr - 2);
1739
1740 /* get the pointer to the pages */
1741 pages_ptr = (struct page **) (*(table_ptr - 1));
1742
1743 /* free the pages */
1744 sep_free_dma_pages(pages_ptr, num_pages, 1);
1745
1746 /* goto to the info entry */
1747 info_entry_ptr = ((struct sep_lli_entry_t *) table_ptr) + (num_entries - 1);
1748
1749 table_ptr = (unsigned long *) info_entry_ptr->physical_address;
1750 num_entries = (info_entry_ptr->block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
1751 }
1752
1753 return;
1754}
1755
1756/**
1757 * sep_find_flow_context - find a flow
1758 * @sep: the SEP we are working with
1759 * @flow_id: flow identifier
1760 *
1761 * Returns a pointer the matching flow, or NULL if the flow does not
1762 * exist.
1763 */
1764
1765static struct sep_flow_context_t *sep_find_flow_context(struct sep_device *sep,
1766 unsigned long flow_id)
1767{
1768 int count;
1769 /*
1770 * always search for flow with id default first - in case we
1771 * already started working on the flow there can be no situation
1772 * when 2 flows are with default flag
1773 */
1774 for (count = 0; count < SEP_DRIVER_NUM_FLOWS; count++) {
1775 if (sep->flows[count].flow_id == flow_id)
1776 return &sep->flows[count];
1777 }
1778 return NULL;
1779}
1780
1781
1782/*
1783 this function handles the request to create the DMA tables for flow
1784*/
1785static int sep_create_flow_dma_tables_handler(struct sep_device *sep,
1786 unsigned long arg)
1787{
1788 int error = -ENOENT;
1789 struct sep_driver_build_flow_table_t command_args;
1790 /* first table - output */
1791 struct sep_lli_entry_t first_table_data;
1792 /* dma table data */
1793 struct sep_lli_entry_t last_table_data;
1794 /* pointer to the info entry of the previuos DMA table */
1795 struct sep_lli_entry_t *prev_info_entry_ptr;
1796 /* pointer to the flow data strucutre */
1797 struct sep_flow_context_t *flow_context_ptr;
1798
1799 dbg("SEP Driver:--------> sep_create_flow_dma_tables_handler start\n");
1800
1801 /* init variables */
1802 prev_info_entry_ptr = 0;
1803 first_table_data.physical_address = 0xffffffff;
1804
1805 /* find the free structure for flow data */
1806 error = -EINVAL;
1807 flow_context_ptr = sep_find_flow_context(sep, SEP_FREE_FLOW_ID);
1808 if (flow_context_ptr == NULL)
1809 goto end_function;
1810
1811 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_build_flow_table_t));
1812 if (error) {
1813 error = -EFAULT;
1814 goto end_function;
1815 }
1816
1817 /* create flow tables */
1818 error = sep_prepare_flow_dma_tables(sep, command_args.num_virtual_buffers, command_args.virt_buff_data_addr, flow_context_ptr, &first_table_data, &last_table_data, command_args.isKernelVirtualAddress);
1819 if (error)
1820 goto end_function_with_error;
1821
1822 /* check if flow is static */
1823 if (!command_args.flow_type)
1824 /* point the info entry of the last to the info entry of the first */
1825 last_table_data = first_table_data;
1826
1827 /* set output params */
1828 command_args.first_table_addr = first_table_data.physical_address;
1829 command_args.first_table_num_entries = ((first_table_data.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK);
1830 command_args.first_table_data_size = (first_table_data.block_size & SEP_TABLE_DATA_SIZE_MASK);
1831
1832 /* send the parameters to user application */
1833 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_build_flow_table_t));
1834 if (error) {
1835 error = -EFAULT;
1836 goto end_function_with_error;
1837 }
1838
1839 /* all the flow created - update the flow entry with temp id */
1840 flow_context_ptr->flow_id = SEP_TEMP_FLOW_ID;
1841
1842 /* set the processing tables data in the context */
1843 if (command_args.input_output_flag == SEP_DRIVER_IN_FLAG)
1844 flow_context_ptr->input_tables_in_process = first_table_data;
1845 else
1846 flow_context_ptr->output_tables_in_process = first_table_data;
1847
1848 goto end_function;
1849
1850end_function_with_error:
1851 /* free the allocated tables */
1852 sep_deallocated_flow_tables(&first_table_data);
1853end_function:
1854 dbg("SEP Driver:<-------- sep_create_flow_dma_tables_handler end\n");
1855 return error;
1856}
1857
1858/*
1859 this function handles add tables to flow
1860*/
1861static int sep_add_flow_tables_handler(struct sep_device *sep, unsigned long arg)
1862{
1863 int error;
1864 unsigned long num_entries;
1865 struct sep_driver_add_flow_table_t command_args;
1866 struct sep_flow_context_t *flow_context_ptr;
1867 /* first dma table data */
1868 struct sep_lli_entry_t first_table_data;
1869 /* last dma table data */
1870 struct sep_lli_entry_t last_table_data;
1871 /* pointer to the info entry of the current DMA table */
1872 struct sep_lli_entry_t *info_entry_ptr;
1873
1874 dbg("SEP Driver:--------> sep_add_flow_tables_handler start\n");
1875
1876 /* get input parameters */
1877 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_add_flow_table_t));
1878 if (error) {
1879 error = -EFAULT;
1880 goto end_function;
1881 }
1882
1883 /* find the flow structure for the flow id */
1884 flow_context_ptr = sep_find_flow_context(sep, command_args.flow_id);
1885 if (flow_context_ptr == NULL)
1886 goto end_function;
1887
1888 /* prepare the flow dma tables */
1889 error = sep_prepare_flow_dma_tables(sep, command_args.num_virtual_buffers, command_args.virt_buff_data_addr, flow_context_ptr, &first_table_data, &last_table_data, command_args.isKernelVirtualAddress);
1890 if (error)
1891 goto end_function_with_error;
1892
1893 /* now check if there is already an existing add table for this flow */
1894 if (command_args.inputOutputFlag == SEP_DRIVER_IN_FLAG) {
1895 /* this buffer was for input buffers */
1896 if (flow_context_ptr->input_tables_flag) {
1897 /* add table already exists - add the new tables to the end
1898 of the previous */
1899 num_entries = (flow_context_ptr->last_input_table.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
1900
1901 info_entry_ptr = (struct sep_lli_entry_t *)
1902 (flow_context_ptr->last_input_table.physical_address + (sizeof(struct sep_lli_entry_t) * (num_entries - 1)));
1903
1904 /* connect to list of tables */
1905 *info_entry_ptr = first_table_data;
1906
1907 /* set the first table data */
1908 first_table_data = flow_context_ptr->first_input_table;
1909 } else {
1910 /* set the input flag */
1911 flow_context_ptr->input_tables_flag = 1;
1912
1913 /* set the first table data */
1914 flow_context_ptr->first_input_table = first_table_data;
1915 }
1916 /* set the last table data */
1917 flow_context_ptr->last_input_table = last_table_data;
1918 } else { /* this is output tables */
1919
1920 /* this buffer was for input buffers */
1921 if (flow_context_ptr->output_tables_flag) {
1922 /* add table already exists - add the new tables to
1923 the end of the previous */
1924 num_entries = (flow_context_ptr->last_output_table.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
1925
1926 info_entry_ptr = (struct sep_lli_entry_t *)
1927 (flow_context_ptr->last_output_table.physical_address + (sizeof(struct sep_lli_entry_t) * (num_entries - 1)));
1928
1929 /* connect to list of tables */
1930 *info_entry_ptr = first_table_data;
1931
1932 /* set the first table data */
1933 first_table_data = flow_context_ptr->first_output_table;
1934 } else {
1935 /* set the input flag */
1936 flow_context_ptr->output_tables_flag = 1;
1937
1938 /* set the first table data */
1939 flow_context_ptr->first_output_table = first_table_data;
1940 }
1941 /* set the last table data */
1942 flow_context_ptr->last_output_table = last_table_data;
1943 }
1944
1945 /* set output params */
1946 command_args.first_table_addr = first_table_data.physical_address;
1947 command_args.first_table_num_entries = ((first_table_data.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK);
1948 command_args.first_table_data_size = (first_table_data.block_size & SEP_TABLE_DATA_SIZE_MASK);
1949
1950 /* send the parameters to user application */
1951 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_add_flow_table_t));
1952 if (error)
1953 error = -EFAULT;
1954end_function_with_error:
1955 /* free the allocated tables */
1956 sep_deallocated_flow_tables(&first_table_data);
1957end_function:
1958 dbg("SEP Driver:<-------- sep_add_flow_tables_handler end\n");
1959 return error;
1960}
1961
1962/*
1963 this function add the flow add message to the specific flow
1964*/
1965static int sep_add_flow_tables_message_handler(struct sep_device *sep, unsigned long arg)
1966{
1967 int error;
1968 struct sep_driver_add_message_t command_args;
1969 struct sep_flow_context_t *flow_context_ptr;
1970
1971 dbg("SEP Driver:--------> sep_add_flow_tables_message_handler start\n");
1972
1973 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_add_message_t));
1974 if (error) {
1975 error = -EFAULT;
1976 goto end_function;
1977 }
1978
1979 /* check input */
1980 if (command_args.message_size_in_bytes > SEP_MAX_ADD_MESSAGE_LENGTH_IN_BYTES) {
1981 error = -ENOMEM;
1982 goto end_function;
1983 }
1984
1985 /* find the flow context */
1986 flow_context_ptr = sep_find_flow_context(sep, command_args.flow_id);
1987 if (flow_context_ptr == NULL)
1988 goto end_function;
1989
1990 /* copy the message into context */
1991 flow_context_ptr->message_size_in_bytes = command_args.message_size_in_bytes;
1992 error = copy_from_user(flow_context_ptr->message, (void *) command_args.message_address, command_args.message_size_in_bytes);
1993 if (error)
1994 error = -EFAULT;
1995end_function:
1996 dbg("SEP Driver:<-------- sep_add_flow_tables_message_handler end\n");
1997 return error;
1998}
1999
2000
2001/*
2002 this function returns the bus and virtual addresses of the static pool
2003*/
2004static int sep_get_static_pool_addr_handler(struct sep_device *sep, unsigned long arg)
2005{
2006 int error;
2007 struct sep_driver_static_pool_addr_t command_args;
2008
2009 dbg("SEP Driver:--------> sep_get_static_pool_addr_handler start\n");
2010
2011 /*prepare the output parameters in the struct */
2012 command_args.physical_static_address = sep->shared_bus + SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
2013 command_args.virtual_static_address = (unsigned long)sep->shared_addr + SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
2014
2015 edbg("SEP Driver:bus_static_address is %08lx, virtual_static_address %08lx\n", command_args.physical_static_address, command_args.virtual_static_address);
2016
2017 /* send the parameters to user application */
2018 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_static_pool_addr_t));
2019 if (error)
2020 error = -EFAULT;
2021 dbg("SEP Driver:<-------- sep_get_static_pool_addr_handler end\n");
2022 return error;
2023}
2024
2025/*
2026 this address gets the offset of the physical address from the start
2027 of the mapped area
2028*/
2029static int sep_get_physical_mapped_offset_handler(struct sep_device *sep, unsigned long arg)
2030{
2031 int error;
2032 struct sep_driver_get_mapped_offset_t command_args;
2033
2034 dbg("SEP Driver:--------> sep_get_physical_mapped_offset_handler start\n");
2035
2036 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_get_mapped_offset_t));
2037 if (error) {
2038 error = -EFAULT;
2039 goto end_function;
2040 }
2041
2042 if (command_args.physical_address < sep->shared_bus) {
2043 error = -EINVAL;
2044 goto end_function;
2045 }
2046
2047 /*prepare the output parameters in the struct */
2048 command_args.offset = command_args.physical_address - sep->shared_bus;
2049
2050 edbg("SEP Driver:bus_address is %08lx, offset is %lu\n", command_args.physical_address, command_args.offset);
2051
2052 /* send the parameters to user application */
2053 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_get_mapped_offset_t));
2054 if (error)
2055 error = -EFAULT;
2056end_function:
2057 dbg("SEP Driver:<-------- sep_get_physical_mapped_offset_handler end\n");
2058 return error;
2059}
2060
2061
2062/*
2063 ?
2064*/
2065static int sep_start_handler(struct sep_device *sep)
2066{
2067 unsigned long reg_val;
2068 unsigned long error = 0;
2069
2070 dbg("SEP Driver:--------> sep_start_handler start\n");
2071
2072 /* wait in polling for message from SEP */
2073 do
2074 reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
2075 while (!reg_val);
2076
2077 /* check the value */
2078 if (reg_val == 0x1)
2079 /* fatal error - read error status from GPRO */
2080 error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
2081 dbg("SEP Driver:<-------- sep_start_handler end\n");
2082 return error;
2083}
2084
2085/*
2086 this function handles the request for SEP initialization
2087*/
2088static int sep_init_handler(struct sep_device *sep, unsigned long arg)
2089{
2090 unsigned long message_word;
2091 unsigned long *message_ptr;
2092 struct sep_driver_init_t command_args;
2093 unsigned long counter;
2094 unsigned long error;
2095 unsigned long reg_val;
2096
2097 dbg("SEP Driver:--------> sep_init_handler start\n");
2098 error = 0;
2099
2100 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_init_t));
2101 if (error) {
2102 error = -EFAULT;
2103 goto end_function;
2104 }
2105 dbg("SEP Driver:--------> sep_init_handler - finished copy_from_user\n");
2106
2107 /* PATCH - configure the DMA to single -burst instead of multi-burst */
2108 /*sep_configure_dma_burst(); */
2109
2110 dbg("SEP Driver:--------> sep_init_handler - finished sep_configure_dma_burst \n");
2111
2112 message_ptr = (unsigned long *) command_args.message_addr;
2113
2114 /* set the base address of the SRAM */
2115 sep_write_reg(sep, HW_SRAM_ADDR_REG_ADDR, HW_CC_SRAM_BASE_ADDRESS);
2116
2117 for (counter = 0; counter < command_args.message_size_in_words; counter++, message_ptr++) {
2118 get_user(message_word, message_ptr);
2119 /* write data to SRAM */
2120 sep_write_reg(sep, HW_SRAM_DATA_REG_ADDR, message_word);
2121 edbg("SEP Driver:message_word is %lu\n", message_word);
2122 /* wait for write complete */
2123 sep_wait_sram_write(sep);
2124 }
2125 dbg("SEP Driver:--------> sep_init_handler - finished getting messages from user space\n");
2126 /* signal SEP */
2127 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x1);
2128
2129 do
2130 reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
2131 while (!(reg_val & 0xFFFFFFFD));
2132
2133 dbg("SEP Driver:--------> sep_init_handler - finished waiting for reg_val & 0xFFFFFFFD \n");
2134
2135 /* check the value */
2136 if (reg_val == 0x1) {
2137 edbg("SEP Driver:init failed\n");
2138
2139 error = sep_read_reg(sep, 0x8060);
2140 edbg("SEP Driver:sw monitor is %lu\n", error);
2141
2142 /* fatal error - read erro status from GPRO */
2143 error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
2144 edbg("SEP Driver:error is %lu\n", error);
2145 }
2146end_function:
2147 dbg("SEP Driver:<-------- sep_init_handler end\n");
2148 return error;
2149
2150}
2151
2152/*
2153 this function handles the request cache and resident reallocation
2154*/
2155static int sep_realloc_cache_resident_handler(struct sep_device *sep,
2156 unsigned long arg)
2157{
2158 struct sep_driver_realloc_cache_resident_t command_args;
2159 int error;
2160
2161 /* copy cache and resident to the their intended locations */
2162 error = sep_load_firmware(sep);
2163 if (error)
2164 return error;
2165
2166 command_args.new_base_addr = sep->shared_bus;
2167
2168 /* find the new base address according to the lowest address between
2169 cache, resident and shared area */
2170 if (sep->resident_bus < command_args.new_base_addr)
2171 command_args.new_base_addr = sep->resident_bus;
2172 if (sep->rar_bus < command_args.new_base_addr)
2173 command_args.new_base_addr = sep->rar_bus;
2174
2175 /* set the return parameters */
2176 command_args.new_cache_addr = sep->rar_bus;
2177 command_args.new_resident_addr = sep->resident_bus;
2178
2179 /* set the new shared area */
2180 command_args.new_shared_area_addr = sep->shared_bus;
2181
2182 edbg("SEP Driver:command_args.new_shared_addr is %08llx\n", command_args.new_shared_area_addr);
2183 edbg("SEP Driver:command_args.new_base_addr is %08llx\n", command_args.new_base_addr);
2184 edbg("SEP Driver:command_args.new_resident_addr is %08llx\n", command_args.new_resident_addr);
2185 edbg("SEP Driver:command_args.new_rar_addr is %08llx\n", command_args.new_cache_addr);
2186
2187 /* return to user */
2188 if (copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_realloc_cache_resident_t)))
2189 return -EFAULT;
2190 return 0;
2191}
2192
2193/**
2194 * sep_get_time_handler - time request from user space
2195 * @sep: sep we are to set the time for
2196 * @arg: pointer to user space arg buffer
2197 *
2198 * This function reports back the time and the address in the SEP
2199 * shared buffer at which it has been placed. (Do we really need this!!!)
2200 */
2201
2202static int sep_get_time_handler(struct sep_device *sep, unsigned long arg)
2203{
2204 struct sep_driver_get_time_t command_args;
2205
2206 mutex_lock(&sep_mutex);
2207 command_args.time_value = sep_set_time(sep);
2208 command_args.time_physical_address = (unsigned long)sep_time_address(sep);
2209 mutex_unlock(&sep_mutex);
2210 if (copy_to_user((void __user *)arg,
2211 &command_args, sizeof(struct sep_driver_get_time_t)))
2212 return -EFAULT;
2213 return 0;
2214
2215}
2216
2217/*
2218 This API handles the end transaction request
2219*/
2220static int sep_end_transaction_handler(struct sep_device *sep, unsigned long arg)
2221{
2222 dbg("SEP Driver:--------> sep_end_transaction_handler start\n");
2223
2224#if 0 /*!SEP_DRIVER_POLLING_MODE */
2225 /* close IMR */
2226 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, 0x7FFF);
2227
2228 /* release IRQ line */
2229 free_irq(SEP_DIRVER_IRQ_NUM, sep);
2230
2231 /* lock the sep mutex */
2232 mutex_unlock(&sep_mutex);
2233#endif
2234
2235 dbg("SEP Driver:<-------- sep_end_transaction_handler end\n");
2236
2237 return 0;
2238}
2239
2240
2241/**
2242 * sep_set_flow_id_handler - handle flow setting
2243 * @sep: the SEP we are configuring
2244 * @flow_id: the flow we are setting
2245 *
2246 * This function handler the set flow id command
2247 */
2248static int sep_set_flow_id_handler(struct sep_device *sep,
2249 unsigned long flow_id)
2250{
2251 int error = 0;
2252 struct sep_flow_context_t *flow_data_ptr;
2253
2254 /* find the flow data structure that was just used for creating new flow
2255 - its id should be default */
2256
2257 mutex_lock(&sep_mutex);
2258 flow_data_ptr = sep_find_flow_context(sep, SEP_TEMP_FLOW_ID);
2259 if (flow_data_ptr)
2260 flow_data_ptr->flow_id = flow_id; /* set flow id */
2261 else
2262 error = -EINVAL;
2263 mutex_unlock(&sep_mutex);
2264 return error;
2265}
2266
2267static long sep_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
2268{
2269 int error = 0;
2270 struct sep_device *sep = filp->private_data;
2271
2272 dbg("------------>SEP Driver: ioctl start\n");
2273
2274 edbg("SEP Driver: cmd is %x\n", cmd);
2275
2276 switch (cmd) {
2277 case SEP_IOCSENDSEPCOMMAND:
2278 /* send command to SEP */
2279 sep_send_command_handler(sep);
2280 edbg("SEP Driver: after sep_send_command_handler\n");
2281 break;
2282 case SEP_IOCSENDSEPRPLYCOMMAND:
2283 /* send reply command to SEP */
2284 sep_send_reply_command_handler(sep);
2285 break;
2286 case SEP_IOCALLOCDATAPOLL:
2287 /* allocate data pool */
2288 error = sep_allocate_data_pool_memory_handler(sep, arg);
2289 break;
2290 case SEP_IOCWRITEDATAPOLL:
2291 /* write data into memory pool */
2292 error = sep_write_into_data_pool_handler(sep, arg);
2293 break;
2294 case SEP_IOCREADDATAPOLL:
2295 /* read data from data pool into application memory */
2296 error = sep_read_from_data_pool_handler(sep, arg);
2297 break;
2298 case SEP_IOCCREATESYMDMATABLE:
2299 /* create dma table for synhronic operation */
2300 error = sep_create_sync_dma_tables_handler(sep, arg);
2301 break;
2302 case SEP_IOCCREATEFLOWDMATABLE:
2303 /* create flow dma tables */
2304 error = sep_create_flow_dma_tables_handler(sep, arg);
2305 break;
2306 case SEP_IOCFREEDMATABLEDATA:
2307 /* free the pages */
2308 error = sep_free_dma_table_data_handler(sep);
2309 break;
2310 case SEP_IOCSETFLOWID:
2311 /* set flow id */
2312 error = sep_set_flow_id_handler(sep, (unsigned long)arg);
2313 break;
2314 case SEP_IOCADDFLOWTABLE:
2315 /* add tables to the dynamic flow */
2316 error = sep_add_flow_tables_handler(sep, arg);
2317 break;
2318 case SEP_IOCADDFLOWMESSAGE:
2319 /* add message of add tables to flow */
2320 error = sep_add_flow_tables_message_handler(sep, arg);
2321 break;
2322 case SEP_IOCSEPSTART:
2323 /* start command to sep */
2324 error = sep_start_handler(sep);
2325 break;
2326 case SEP_IOCSEPINIT:
2327 /* init command to sep */
2328 error = sep_init_handler(sep, arg);
2329 break;
2330 case SEP_IOCGETSTATICPOOLADDR:
2331 /* get the physical and virtual addresses of the static pool */
2332 error = sep_get_static_pool_addr_handler(sep, arg);
2333 break;
2334 case SEP_IOCENDTRANSACTION:
2335 error = sep_end_transaction_handler(sep, arg);
2336 break;
2337 case SEP_IOCREALLOCCACHERES:
2338 error = sep_realloc_cache_resident_handler(sep, arg);
2339 break;
2340 case SEP_IOCGETMAPPEDADDROFFSET:
2341 error = sep_get_physical_mapped_offset_handler(sep, arg);
2342 break;
2343 case SEP_IOCGETIME:
2344 error = sep_get_time_handler(sep, arg);
2345 break;
2346 default:
2347 error = -ENOTTY;
2348 break;
2349 }
2350 dbg("SEP Driver:<-------- ioctl end\n");
2351 return error;
2352}
2353
2354
2355
2356#if !SEP_DRIVER_POLLING_MODE
2357
2358/* handler for flow done interrupt */
2359
2360static void sep_flow_done_handler(struct work_struct *work)
2361{
2362 struct sep_flow_context_t *flow_data_ptr;
2363
2364 /* obtain the mutex */
2365 mutex_lock(&sep_mutex);
2366
2367 /* get the pointer to context */
2368 flow_data_ptr = (struct sep_flow_context_t *) work;
2369
2370 /* free all the current input tables in sep */
2371 sep_deallocated_flow_tables(&flow_data_ptr->input_tables_in_process);
2372
2373 /* free all the current tables output tables in SEP (if needed) */
2374 if (flow_data_ptr->output_tables_in_process.physical_address != 0xffffffff)
2375 sep_deallocated_flow_tables(&flow_data_ptr->output_tables_in_process);
2376
2377 /* check if we have additional tables to be sent to SEP only input
2378 flag may be checked */
2379 if (flow_data_ptr->input_tables_flag) {
2380 /* copy the message to the shared RAM and signal SEP */
2381 memcpy((void *) flow_data_ptr->message, (void *) sep->shared_addr, flow_data_ptr->message_size_in_bytes);
2382
2383 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR2_REG_ADDR, 0x2);
2384 }
2385 mutex_unlock(&sep_mutex);
2386}
2387/*
2388 interrupt handler function
2389*/
2390static irqreturn_t sep_inthandler(int irq, void *dev_id)
2391{
2392 irqreturn_t int_error;
2393 unsigned long reg_val;
2394 unsigned long flow_id;
2395 struct sep_flow_context_t *flow_context_ptr;
2396 struct sep_device *sep = dev_id;
2397
2398 int_error = IRQ_HANDLED;
2399
2400 /* read the IRR register to check if this is SEP interrupt */
2401 reg_val = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
2402 edbg("SEP Interrupt - reg is %08lx\n", reg_val);
2403
2404 /* check if this is the flow interrupt */
2405 if (0 /*reg_val & (0x1 << 11) */ ) {
2406 /* read GPRO to find out the which flow is done */
2407 flow_id = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
2408
2409 /* find the contex of the flow */
2410 flow_context_ptr = sep_find_flow_context(sep, flow_id >> 28);
2411 if (flow_context_ptr == NULL)
2412 goto end_function_with_error;
2413
2414 /* queue the work */
2415 INIT_WORK(&flow_context_ptr->flow_wq, sep_flow_done_handler);
2416 queue_work(sep->flow_wq, &flow_context_ptr->flow_wq);
2417
2418 } else {
2419 /* check if this is reply interrupt from SEP */
2420 if (reg_val & (0x1 << 13)) {
2421 /* update the counter of reply messages */
2422 sep->reply_ct++;
2423 /* wake up the waiting process */
2424 wake_up(&sep_event);
2425 } else {
2426 int_error = IRQ_NONE;
2427 goto end_function;
2428 }
2429 }
2430end_function_with_error:
2431 /* clear the interrupt */
2432 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, reg_val);
2433end_function:
2434 return int_error;
2435}
2436
2437#endif
2438
2439
2440
2441#if 0
2442
2443static void sep_wait_busy(struct sep_device *sep)
2444{
2445 u32 reg;
2446
2447 do {
2448 reg = sep_read_reg(sep, HW_HOST_SEP_BUSY_REG_ADDR);
2449 } while (reg);
2450}
2451
2452/*
2453 PATCH for configuring the DMA to single burst instead of multi-burst
2454*/
2455static void sep_configure_dma_burst(struct sep_device *sep)
2456{
2457#define HW_AHB_RD_WR_BURSTS_REG_ADDR 0x0E10UL
2458
2459 dbg("SEP Driver:<-------- sep_configure_dma_burst start \n");
2460
2461 /* request access to registers from SEP */
2462 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
2463
2464 dbg("SEP Driver:<-------- sep_configure_dma_burst finished request access to registers from SEP (write reg) \n");
2465
2466 sep_wait_busy(sep);
2467
2468 dbg("SEP Driver:<-------- sep_configure_dma_burst finished request access to registers from SEP (while(revVal) wait loop) \n");
2469
2470 /* set the DMA burst register to single burst */
2471 sep_write_reg(sep, HW_AHB_RD_WR_BURSTS_REG_ADDR, 0x0UL);
2472
2473 /* release the sep busy */
2474 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x0UL);
2475 sep_wait_busy(sep);
2476
2477 dbg("SEP Driver:<-------- sep_configure_dma_burst done \n");
2478
2479}
2480
2481#endif
2482
2483/*
2484 Function that is activated on the successful probe of the SEP device
2485*/
2486static int __devinit sep_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2487{
2488 int error = 0;
2489 struct sep_device *sep;
2490 int counter;
2491 int size; /* size of memory for allocation */
2492
2493 edbg("Sep pci probe starting\n");
2494 if (sep_dev != NULL) {
2495 dev_warn(&pdev->dev, "only one SEP supported.\n");
2496 return -EBUSY;
2497 }
2498
2499 /* enable the device */
2500 error = pci_enable_device(pdev);
2501 if (error) {
2502 edbg("error enabling pci device\n");
2503 goto end_function;
2504 }
2505
2506 /* set the pci dev pointer */
2507 sep_dev = &sep_instance;
2508 sep = &sep_instance;
2509
2510 edbg("sep->shared_addr = %p\n", sep->shared_addr);
2511 /* transaction counter that coordinates the transactions between SEP
2512 and HOST */
2513 sep->send_ct = 0;
2514 /* counter for the messages from sep */
2515 sep->reply_ct = 0;
2516 /* counter for the number of bytes allocated in the pool
2517 for the current transaction */
2518 sep->data_pool_bytes_allocated = 0;
2519
2520 /* calculate the total size for allocation */
2521 size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
2522 SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES + SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
2523
2524 /* allocate the shared area */
2525 if (sep_map_and_alloc_shared_area(sep, size)) {
2526 error = -ENOMEM;
2527 /* allocation failed */
2528 goto end_function_error;
2529 }
2530 /* now set the memory regions */
2531#if (SEP_DRIVER_RECONFIG_MESSAGE_AREA == 1)
2532 /* Note: this test section will need moving before it could ever
2533 work as the registers are not yet mapped ! */
2534 /* send the new SHARED MESSAGE AREA to the SEP */
2535 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR1_REG_ADDR, sep->shared_bus);
2536
2537 /* poll for SEP response */
2538 retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
2539 while (retval != 0xffffffff && retval != sep->shared_bus)
2540 retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
2541
2542 /* check the return value (register) */
2543 if (retval != sep->shared_bus) {
2544 error = -ENOMEM;
2545 goto end_function_deallocate_sep_shared_area;
2546 }
2547#endif
2548 /* init the flow contextes */
2549 for (counter = 0; counter < SEP_DRIVER_NUM_FLOWS; counter++)
2550 sep->flows[counter].flow_id = SEP_FREE_FLOW_ID;
2551
2552 sep->flow_wq = create_singlethread_workqueue("sepflowwq");
2553 if (sep->flow_wq == NULL) {
2554 error = -ENOMEM;
2555 edbg("sep_driver:flow queue creation failed\n");
2556 goto end_function_deallocate_sep_shared_area;
2557 }
2558 edbg("SEP Driver: create flow workqueue \n");
2559 sep->pdev = pci_dev_get(pdev);
2560
2561 sep->reg_addr = pci_ioremap_bar(pdev, 0);
2562 if (!sep->reg_addr) {
2563 edbg("sep: ioremap of registers failed.\n");
2564 goto end_function_deallocate_sep_shared_area;
2565 }
2566 edbg("SEP Driver:reg_addr is %p\n", sep->reg_addr);
2567
2568 /* load the rom code */
2569 sep_load_rom_code(sep);
2570
2571 /* set up system base address and shared memory location */
2572 sep->rar_addr = dma_alloc_coherent(&sep->pdev->dev,
2573 2 * SEP_RAR_IO_MEM_REGION_SIZE,
2574 &sep->rar_bus, GFP_KERNEL);
2575
2576 if (!sep->rar_addr) {
2577 edbg("SEP Driver:can't allocate rar\n");
2578 goto end_function_uniomap;
2579 }
2580
2581
2582 edbg("SEP Driver:rar_bus is %08llx\n", (unsigned long long)sep->rar_bus);
2583 edbg("SEP Driver:rar_virtual is %p\n", sep->rar_addr);
2584
2585#if !SEP_DRIVER_POLLING_MODE
2586
2587 edbg("SEP Driver: about to write IMR and ICR REG_ADDR\n");
2588
2589 /* clear ICR register */
2590 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
2591
2592 /* set the IMR register - open only GPR 2 */
2593 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
2594
2595 edbg("SEP Driver: about to call request_irq\n");
2596 /* get the interrupt line */
2597 error = request_irq(pdev->irq, sep_inthandler, IRQF_SHARED, "sep_driver", sep);
2598 if (error)
2599 goto end_function_free_res;
2600 return 0;
2601 edbg("SEP Driver: about to write IMR REG_ADDR");
2602
2603 /* set the IMR register - open only GPR 2 */
2604 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
2605
2606end_function_free_res:
2607 dma_free_coherent(&sep->pdev->dev, 2 * SEP_RAR_IO_MEM_REGION_SIZE,
2608 sep->rar_addr, sep->rar_bus);
2609#endif /* SEP_DRIVER_POLLING_MODE */
2610end_function_uniomap:
2611 iounmap(sep->reg_addr);
2612end_function_deallocate_sep_shared_area:
2613 /* de-allocate shared area */
2614 sep_unmap_and_free_shared_area(sep, size);
2615end_function_error:
2616 sep_dev = NULL;
2617end_function:
2618 return error;
2619}
2620
2621static const struct pci_device_id sep_pci_id_tbl[] = {
2622 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080c)},
2623 {0}
2624};
2625
2626MODULE_DEVICE_TABLE(pci, sep_pci_id_tbl);
2627
2628/* field for registering driver to PCI device */
2629static struct pci_driver sep_pci_driver = {
2630 .name = "sep_sec_driver",
2631 .id_table = sep_pci_id_tbl,
2632 .probe = sep_probe
2633 /* FIXME: remove handler */
2634};
2635
2636/* major and minor device numbers */
2637static dev_t sep_devno;
2638
2639/* the files operations structure of the driver */
2640static struct file_operations sep_file_operations = {
2641 .owner = THIS_MODULE,
2642 .unlocked_ioctl = sep_ioctl,
2643 .poll = sep_poll,
2644 .open = sep_open,
2645 .release = sep_release,
2646 .mmap = sep_mmap,
2647};
2648
2649
2650/* cdev struct of the driver */
2651static struct cdev sep_cdev;
2652
2653/*
2654 this function registers the driver to the file system
2655*/
2656static int sep_register_driver_to_fs(void)
2657{
2658 int ret_val = alloc_chrdev_region(&sep_devno, 0, 1, "sep_sec_driver");
2659 if (ret_val) {
2660 edbg("sep: major number allocation failed, retval is %d\n",
2661 ret_val);
2662 return ret_val;
2663 }
2664 /* init cdev */
2665 cdev_init(&sep_cdev, &sep_file_operations);
2666 sep_cdev.owner = THIS_MODULE;
2667
2668 /* register the driver with the kernel */
2669 ret_val = cdev_add(&sep_cdev, sep_devno, 1);
2670 if (ret_val) {
2671 edbg("sep_driver:cdev_add failed, retval is %d\n", ret_val);
2672 /* unregister dev numbers */
2673 unregister_chrdev_region(sep_devno, 1);
2674 }
2675 return ret_val;
2676}
2677
2678
2679/*--------------------------------------------------------------
2680 init function
2681----------------------------------------------------------------*/
2682static int __init sep_init(void)
2683{
2684 int ret_val = 0;
2685 dbg("SEP Driver:-------->Init start\n");
2686 /* FIXME: Probe can occur before we are ready to survive a probe */
2687 ret_val = pci_register_driver(&sep_pci_driver);
2688 if (ret_val) {
2689 edbg("sep_driver:sep_driver_to_device failed, ret_val is %d\n", ret_val);
2690 goto end_function_unregister_from_fs;
2691 }
2692 /* register driver to fs */
2693 ret_val = sep_register_driver_to_fs();
2694 if (ret_val)
2695 goto end_function_unregister_pci;
2696 goto end_function;
2697end_function_unregister_pci:
2698 pci_unregister_driver(&sep_pci_driver);
2699end_function_unregister_from_fs:
2700 /* unregister from fs */
2701 cdev_del(&sep_cdev);
2702 /* unregister dev numbers */
2703 unregister_chrdev_region(sep_devno, 1);
2704end_function:
2705 dbg("SEP Driver:<-------- Init end\n");
2706 return ret_val;
2707}
2708
2709
2710/*-------------------------------------------------------------
2711 exit function
2712--------------------------------------------------------------*/
2713static void __exit sep_exit(void)
2714{
2715 int size;
2716
2717 dbg("SEP Driver:--------> Exit start\n");
2718
2719 /* unregister from fs */
2720 cdev_del(&sep_cdev);
2721 /* unregister dev numbers */
2722 unregister_chrdev_region(sep_devno, 1);
2723 /* calculate the total size for de-allocation */
2724 size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
2725 SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES + SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
2726 /* FIXME: We need to do this in the unload for the device */
2727 /* free shared area */
2728 if (sep_dev) {
2729 sep_unmap_and_free_shared_area(sep_dev, size);
2730 edbg("SEP Driver: free pages SEP SHARED AREA \n");
2731 iounmap((void *) sep_dev->reg_addr);
2732 edbg("SEP Driver: iounmap \n");
2733 }
2734 edbg("SEP Driver: release_mem_region \n");
2735 dbg("SEP Driver:<-------- Exit end\n");
2736}
2737
2738
2739module_init(sep_init);
2740module_exit(sep_exit);
2741
2742MODULE_LICENSE("GPL");
diff --git a/drivers/staging/sep/sep_driver_api.h b/drivers/staging/sep/sep_driver_api.h
deleted file mode 100644
index 7ef16da7c4ef..000000000000
--- a/drivers/staging/sep/sep_driver_api.h
+++ /dev/null
@@ -1,425 +0,0 @@
1/*
2 *
3 * sep_driver_api.h - Security Processor Driver api definitions
4 *
5 * Copyright(c) 2009 Intel Corporation. All rights reserved.
6 * Copyright(c) 2009 Discretix. All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc., 59
20 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 *
22 * CONTACTS:
23 *
24 * Mark Allyn mark.a.allyn@intel.com
25 *
26 * CHANGES:
27 *
28 * 2009.06.26 Initial publish
29 *
30 */
31
32#ifndef __SEP_DRIVER_API_H__
33#define __SEP_DRIVER_API_H__
34
35
36
37/*----------------------------------------------------------------
38 IOCTL command defines
39 -----------------------------------------------------------------*/
40
41/* magic number 1 of the sep IOCTL command */
42#define SEP_IOC_MAGIC_NUMBER 's'
43
44/* sends interrupt to sep that message is ready */
45#define SEP_IOCSENDSEPCOMMAND _IO(SEP_IOC_MAGIC_NUMBER , 0)
46
47/* sends interrupt to sep that message is ready */
48#define SEP_IOCSENDSEPRPLYCOMMAND _IO(SEP_IOC_MAGIC_NUMBER , 1)
49
50/* allocate memory in data pool */
51#define SEP_IOCALLOCDATAPOLL _IO(SEP_IOC_MAGIC_NUMBER , 2)
52
53/* write to pre-allocated memory in data pool */
54#define SEP_IOCWRITEDATAPOLL _IO(SEP_IOC_MAGIC_NUMBER , 3)
55
56/* read from pre-allocated memory in data pool */
57#define SEP_IOCREADDATAPOLL _IO(SEP_IOC_MAGIC_NUMBER , 4)
58
59/* create sym dma lli tables */
60#define SEP_IOCCREATESYMDMATABLE _IO(SEP_IOC_MAGIC_NUMBER , 5)
61
62/* create flow dma lli tables */
63#define SEP_IOCCREATEFLOWDMATABLE _IO(SEP_IOC_MAGIC_NUMBER , 6)
64
65/* free dynamic data aalocated during table creation */
66#define SEP_IOCFREEDMATABLEDATA _IO(SEP_IOC_MAGIC_NUMBER , 7)
67
68/* get the static pool area addresses (physical and virtual) */
69#define SEP_IOCGETSTATICPOOLADDR _IO(SEP_IOC_MAGIC_NUMBER , 8)
70
71/* set flow id command */
72#define SEP_IOCSETFLOWID _IO(SEP_IOC_MAGIC_NUMBER , 9)
73
74/* add tables to the dynamic flow */
75#define SEP_IOCADDFLOWTABLE _IO(SEP_IOC_MAGIC_NUMBER , 10)
76
77/* add flow add tables message */
78#define SEP_IOCADDFLOWMESSAGE _IO(SEP_IOC_MAGIC_NUMBER , 11)
79
80/* start sep command */
81#define SEP_IOCSEPSTART _IO(SEP_IOC_MAGIC_NUMBER , 12)
82
83/* init sep command */
84#define SEP_IOCSEPINIT _IO(SEP_IOC_MAGIC_NUMBER , 13)
85
86/* end transaction command */
87#define SEP_IOCENDTRANSACTION _IO(SEP_IOC_MAGIC_NUMBER , 15)
88
89/* reallocate cache and resident */
90#define SEP_IOCREALLOCCACHERES _IO(SEP_IOC_MAGIC_NUMBER , 16)
91
92/* get the offset of the address starting from the beginnnig of the map area */
93#define SEP_IOCGETMAPPEDADDROFFSET _IO(SEP_IOC_MAGIC_NUMBER , 17)
94
95/* get time address and value */
96#define SEP_IOCGETIME _IO(SEP_IOC_MAGIC_NUMBER , 19)
97
98/*-------------------------------------------
99 TYPEDEFS
100----------------------------------------------*/
101
102/*
103 init command struct
104*/
105struct sep_driver_init_t {
106 /* start of the 1G of the host memory address that SEP can access */
107 unsigned long message_addr;
108
109 /* start address of resident */
110 unsigned long message_size_in_words;
111
112};
113
114
115/*
116 realloc cache resident command
117*/
118struct sep_driver_realloc_cache_resident_t {
119 /* new cache address */
120 u64 new_cache_addr;
121 /* new resident address */
122 u64 new_resident_addr;
123 /* new resident address */
124 u64 new_shared_area_addr;
125 /* new base address */
126 u64 new_base_addr;
127};
128
129struct sep_driver_alloc_t {
130 /* virtual address of allocated space */
131 unsigned long offset;
132
133 /* physical address of allocated space */
134 unsigned long phys_address;
135
136 /* number of bytes to allocate */
137 unsigned long num_bytes;
138};
139
140/*
141 */
142struct sep_driver_write_t {
143 /* application space address */
144 unsigned long app_address;
145
146 /* address of the data pool */
147 unsigned long datapool_address;
148
149 /* number of bytes to write */
150 unsigned long num_bytes;
151};
152
153/*
154 */
155struct sep_driver_read_t {
156 /* application space address */
157 unsigned long app_address;
158
159 /* address of the data pool */
160 unsigned long datapool_address;
161
162 /* number of bytes to read */
163 unsigned long num_bytes;
164};
165
166/*
167*/
168struct sep_driver_build_sync_table_t {
169 /* address value of the data in */
170 unsigned long app_in_address;
171
172 /* size of data in */
173 unsigned long data_in_size;
174
175 /* address of the data out */
176 unsigned long app_out_address;
177
178 /* the size of the block of the operation - if needed,
179 every table will be modulo this parameter */
180 unsigned long block_size;
181
182 /* the physical address of the first input DMA table */
183 unsigned long in_table_address;
184
185 /* number of entries in the first input DMA table */
186 unsigned long in_table_num_entries;
187
188 /* the physical address of the first output DMA table */
189 unsigned long out_table_address;
190
191 /* number of entries in the first output DMA table */
192 unsigned long out_table_num_entries;
193
194 /* data in the first input table */
195 unsigned long table_data_size;
196
197 /* distinct user/kernel layout */
198 bool isKernelVirtualAddress;
199
200};
201
202/*
203*/
204struct sep_driver_build_flow_table_t {
205 /* flow type */
206 unsigned long flow_type;
207
208 /* flag for input output */
209 unsigned long input_output_flag;
210
211 /* address value of the data in */
212 unsigned long virt_buff_data_addr;
213
214 /* size of data in */
215 unsigned long num_virtual_buffers;
216
217 /* the physical address of the first input DMA table */
218 unsigned long first_table_addr;
219
220 /* number of entries in the first input DMA table */
221 unsigned long first_table_num_entries;
222
223 /* data in the first input table */
224 unsigned long first_table_data_size;
225
226 /* distinct user/kernel layout */
227 bool isKernelVirtualAddress;
228};
229
230
231struct sep_driver_add_flow_table_t {
232 /* flow id */
233 unsigned long flow_id;
234
235 /* flag for input output */
236 unsigned long inputOutputFlag;
237
238 /* address value of the data in */
239 unsigned long virt_buff_data_addr;
240
241 /* size of data in */
242 unsigned long num_virtual_buffers;
243
244 /* address of the first table */
245 unsigned long first_table_addr;
246
247 /* number of entries in the first table */
248 unsigned long first_table_num_entries;
249
250 /* data size of the first table */
251 unsigned long first_table_data_size;
252
253 /* distinct user/kernel layout */
254 bool isKernelVirtualAddress;
255
256};
257
258/*
259 command struct for set flow id
260*/
261struct sep_driver_set_flow_id_t {
262 /* flow id to set */
263 unsigned long flow_id;
264};
265
266
267/* command struct for add tables message */
268struct sep_driver_add_message_t {
269 /* flow id to set */
270 unsigned long flow_id;
271
272 /* message size in bytes */
273 unsigned long message_size_in_bytes;
274
275 /* address of the message */
276 unsigned long message_address;
277};
278
279/* command struct for static pool addresses */
280struct sep_driver_static_pool_addr_t {
281 /* physical address of the static pool */
282 unsigned long physical_static_address;
283
284 /* virtual address of the static pool */
285 unsigned long virtual_static_address;
286};
287
288/* command struct for getiing offset of the physical address from
289 the start of the mapped area */
290struct sep_driver_get_mapped_offset_t {
291 /* physical address of the static pool */
292 unsigned long physical_address;
293
294 /* virtual address of the static pool */
295 unsigned long offset;
296};
297
298/* command struct for getting time value and address */
299struct sep_driver_get_time_t {
300 /* physical address of stored time */
301 unsigned long time_physical_address;
302
303 /* value of the stored time */
304 unsigned long time_value;
305};
306
307
308/*
309 structure that represent one entry in the DMA LLI table
310*/
311struct sep_lli_entry_t {
312 /* physical address */
313 unsigned long physical_address;
314
315 /* block size */
316 unsigned long block_size;
317};
318
319/*
320 structure that reperesents data needed for lli table construction
321*/
322struct sep_lli_prepare_table_data_t {
323 /* pointer to the memory where the first lli entry to be built */
324 struct sep_lli_entry_t *lli_entry_ptr;
325
326 /* pointer to the array of lli entries from which the table is to be built */
327 struct sep_lli_entry_t *lli_array_ptr;
328
329 /* number of elements in lli array */
330 int lli_array_size;
331
332 /* number of entries in the created table */
333 int num_table_entries;
334
335 /* number of array entries processed during table creation */
336 int num_array_entries_processed;
337
338 /* the totatl data size in the created table */
339 int lli_table_total_data_size;
340};
341
342/*
343 structure that represent tone table - it is not used in code, jkust
344 to show what table looks like
345*/
346struct sep_lli_table_t {
347 /* number of pages mapped in this tables. If 0 - means that the table
348 is not defined (used as a valid flag) */
349 unsigned long num_pages;
350 /*
351 pointer to array of page pointers that represent the mapping of the
352 virtual buffer defined by the table to the physical memory. If this
353 pointer is NULL, it means that the table is not defined
354 (used as a valid flag)
355 */
356 struct page **table_page_array_ptr;
357
358 /* maximum flow entries in table */
359 struct sep_lli_entry_t lli_entries[SEP_DRIVER_MAX_FLOW_NUM_ENTRIES_IN_TABLE];
360};
361
362
363/*
364 structure for keeping the mapping of the virtual buffer into physical pages
365*/
366struct sep_flow_buffer_data {
367 /* pointer to the array of page structs pointers to the pages of the
368 virtual buffer */
369 struct page **page_array_ptr;
370
371 /* number of pages taken by the virtual buffer */
372 unsigned long num_pages;
373
374 /* this flag signals if this page_array is the last one among many that were
375 sent in one setting to SEP */
376 unsigned long last_page_array_flag;
377};
378
379/*
380 struct that keeps all the data for one flow
381*/
382struct sep_flow_context_t {
383 /*
384 work struct for handling the flow done interrupt in the workqueue
385 this structure must be in the first place, since it will be used
386 forcasting to the containing flow context
387 */
388 struct work_struct flow_wq;
389
390 /* flow id */
391 unsigned long flow_id;
392
393 /* additional input tables exists */
394 unsigned long input_tables_flag;
395
396 /* additional output tables exists */
397 unsigned long output_tables_flag;
398
399 /* data of the first input file */
400 struct sep_lli_entry_t first_input_table;
401
402 /* data of the first output table */
403 struct sep_lli_entry_t first_output_table;
404
405 /* last input table data */
406 struct sep_lli_entry_t last_input_table;
407
408 /* last output table data */
409 struct sep_lli_entry_t last_output_table;
410
411 /* first list of table */
412 struct sep_lli_entry_t input_tables_in_process;
413
414 /* output table in process (in sep) */
415 struct sep_lli_entry_t output_tables_in_process;
416
417 /* size of messages in bytes */
418 unsigned long message_size_in_bytes;
419
420 /* message */
421 unsigned char message[SEP_MAX_ADD_MESSAGE_LENGTH_IN_BYTES];
422};
423
424
425#endif
diff --git a/drivers/staging/sep/sep_driver_config.h b/drivers/staging/sep/sep_driver_config.h
deleted file mode 100644
index 6008fe5eca09..000000000000
--- a/drivers/staging/sep/sep_driver_config.h
+++ /dev/null
@@ -1,225 +0,0 @@
1/*
2 *
3 * sep_driver_config.h - Security Processor Driver configuration
4 *
5 * Copyright(c) 2009 Intel Corporation. All rights reserved.
6 * Copyright(c) 2009 Discretix. All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc., 59
20 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 *
22 * CONTACTS:
23 *
24 * Mark Allyn mark.a.allyn@intel.com
25 *
26 * CHANGES:
27 *
28 * 2009.06.26 Initial publish
29 *
30 */
31
32#ifndef __SEP_DRIVER_CONFIG_H__
33#define __SEP_DRIVER_CONFIG_H__
34
35
36/*--------------------------------------
37 DRIVER CONFIGURATION FLAGS
38 -------------------------------------*/
39
40/* if flag is on , then the driver is running in polling and
41 not interrupt mode */
42#define SEP_DRIVER_POLLING_MODE 1
43
44/* flag which defines if the shared area address should be
45 reconfiged (send to SEP anew) during init of the driver */
46#define SEP_DRIVER_RECONFIG_MESSAGE_AREA 0
47
48/* the mode for running on the ARM1172 Evaluation platform (flag is 1) */
49#define SEP_DRIVER_ARM_DEBUG_MODE 0
50
51/*-------------------------------------------
52 INTERNAL DATA CONFIGURATION
53 -------------------------------------------*/
54
55/* flag for the input array */
56#define SEP_DRIVER_IN_FLAG 0
57
58/* flag for output array */
59#define SEP_DRIVER_OUT_FLAG 1
60
61/* maximum number of entries in one LLI tables */
62#define SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP 8
63
64
65/*--------------------------------------------------------
66 SHARED AREA memory total size is 36K
67 it is divided is following:
68
69 SHARED_MESSAGE_AREA 8K }
70 }
71 STATIC_POOL_AREA 4K } MAPPED AREA ( 24 K)
72 }
73 DATA_POOL_AREA 12K }
74
75 SYNCHRONIC_DMA_TABLES_AREA 5K
76
77 FLOW_DMA_TABLES_AREA 4K
78
79 SYSTEM_MEMORY_AREA 3k
80
81 SYSTEM_MEMORY total size is 3k
82 it is divided as following:
83
84 TIME_MEMORY_AREA 8B
85-----------------------------------------------------------*/
86
87
88
89/*
90 the maximum length of the message - the rest of the message shared
91 area will be dedicated to the dma lli tables
92*/
93#define SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES (8 * 1024)
94
95/* the size of the message shared area in pages */
96#define SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES (8 * 1024)
97
98/* the size of the data pool static area in pages */
99#define SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES (4 * 1024)
100
101/* the size of the data pool shared area size in pages */
102#define SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES (12 * 1024)
103
104/* the size of the message shared area in pages */
105#define SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES (1024 * 5)
106
107
108/* the size of the data pool shared area size in pages */
109#define SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES (1024 * 4)
110
111/* system data (time, caller id etc') pool */
112#define SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES 100
113
114
115/* area size that is mapped - we map the MESSAGE AREA, STATIC POOL and
116 DATA POOL areas. area must be module 4k */
117#define SEP_DRIVER_MMMAP_AREA_SIZE (1024 * 24)
118
119
120/*-----------------------------------------------
121 offsets of the areas starting from the shared area start address
122*/
123
124/* message area offset */
125#define SEP_DRIVER_MESSAGE_AREA_OFFSET_IN_BYTES 0
126
127/* static pool area offset */
128#define SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES \
129 (SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES)
130
131/* data pool area offset */
132#define SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES \
133 (SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES + \
134 SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES)
135
136/* synhronic dma tables area offset */
137#define SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES \
138 (SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + \
139 SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES)
140
141/* sep driver flow dma tables area offset */
142#define SEP_DRIVER_FLOW_DMA_TABLES_AREA_OFFSET_IN_BYTES \
143 (SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES + \
144 SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES)
145
146/* system memory offset in bytes */
147#define SEP_DRIVER_SYSTEM_DATA_MEMORY_OFFSET_IN_BYTES \
148 (SEP_DRIVER_FLOW_DMA_TABLES_AREA_OFFSET_IN_BYTES + \
149 SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES)
150
151/* offset of the time area */
152#define SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES \
153 (SEP_DRIVER_SYSTEM_DATA_MEMORY_OFFSET_IN_BYTES)
154
155
156
157/* start physical address of the SEP registers memory in HOST */
158#define SEP_IO_MEM_REGION_START_ADDRESS 0x80000000
159
160/* size of the SEP registers memory region in HOST (for now 100 registers) */
161#define SEP_IO_MEM_REGION_SIZE (2 * 0x100000)
162
163/* define the number of IRQ for SEP interrupts */
164#define SEP_DIRVER_IRQ_NUM 1
165
166/* maximum number of add buffers */
167#define SEP_MAX_NUM_ADD_BUFFERS 100
168
169/* number of flows */
170#define SEP_DRIVER_NUM_FLOWS 4
171
172/* maximum number of entries in flow table */
173#define SEP_DRIVER_MAX_FLOW_NUM_ENTRIES_IN_TABLE 25
174
175/* offset of the num entries in the block length entry of the LLI */
176#define SEP_NUM_ENTRIES_OFFSET_IN_BITS 24
177
178/* offset of the interrupt flag in the block length entry of the LLI */
179#define SEP_INT_FLAG_OFFSET_IN_BITS 31
180
181/* mask for extracting data size from LLI */
182#define SEP_TABLE_DATA_SIZE_MASK 0xFFFFFF
183
184/* mask for entries after being shifted left */
185#define SEP_NUM_ENTRIES_MASK 0x7F
186
187/* default flow id */
188#define SEP_FREE_FLOW_ID 0xFFFFFFFF
189
190/* temp flow id used during cretiong of new flow until receiving
191 real flow id from sep */
192#define SEP_TEMP_FLOW_ID (SEP_DRIVER_NUM_FLOWS + 1)
193
194/* maximum add buffers message length in bytes */
195#define SEP_MAX_ADD_MESSAGE_LENGTH_IN_BYTES (7 * 4)
196
197/* maximum number of concurrent virtual buffers */
198#define SEP_MAX_VIRT_BUFFERS_CONCURRENT 100
199
200/* the token that defines the start of time address */
201#define SEP_TIME_VAL_TOKEN 0x12345678
202
203/* DEBUG LEVEL MASKS */
204#define SEP_DEBUG_LEVEL_BASIC 0x1
205
206#define SEP_DEBUG_LEVEL_EXTENDED 0x4
207
208
209/* Debug helpers */
210
211#define dbg(fmt, args...) \
212do {\
213 if (debug & SEP_DEBUG_LEVEL_BASIC) \
214 printk(KERN_DEBUG fmt, ##args); \
215} while(0);
216
217#define edbg(fmt, args...) \
218do { \
219 if (debug & SEP_DEBUG_LEVEL_EXTENDED) \
220 printk(KERN_DEBUG fmt, ##args); \
221} while(0);
222
223
224
225#endif
diff --git a/drivers/staging/sep/sep_driver_hw_defs.h b/drivers/staging/sep/sep_driver_hw_defs.h
deleted file mode 100644
index ea6abd8a14b4..000000000000
--- a/drivers/staging/sep/sep_driver_hw_defs.h
+++ /dev/null
@@ -1,232 +0,0 @@
1/*
2 *
3 * sep_driver_hw_defs.h - Security Processor Driver hardware definitions
4 *
5 * Copyright(c) 2009 Intel Corporation. All rights reserved.
6 * Copyright(c) 2009 Discretix. All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc., 59
20 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 *
22 * CONTACTS:
23 *
24 * Mark Allyn mark.a.allyn@intel.com
25 *
26 * CHANGES:
27 *
28 * 2009.06.26 Initial publish
29 *
30 */
31
32#ifndef SEP_DRIVER_HW_DEFS__H
33#define SEP_DRIVER_HW_DEFS__H
34
35/*--------------------------------------------------------------------------*/
36/* Abstract: HW Registers Defines. */
37/* */
38/* Note: This file was automatically created !!! */
39/* DO NOT EDIT THIS FILE !!! */
40/*--------------------------------------------------------------------------*/
41
42
43/* cf registers */
44#define HW_R0B_ADDR_0_REG_ADDR 0x0000UL
45#define HW_R0B_ADDR_1_REG_ADDR 0x0004UL
46#define HW_R0B_ADDR_2_REG_ADDR 0x0008UL
47#define HW_R0B_ADDR_3_REG_ADDR 0x000cUL
48#define HW_R0B_ADDR_4_REG_ADDR 0x0010UL
49#define HW_R0B_ADDR_5_REG_ADDR 0x0014UL
50#define HW_R0B_ADDR_6_REG_ADDR 0x0018UL
51#define HW_R0B_ADDR_7_REG_ADDR 0x001cUL
52#define HW_R0B_ADDR_8_REG_ADDR 0x0020UL
53#define HW_R2B_ADDR_0_REG_ADDR 0x0080UL
54#define HW_R2B_ADDR_1_REG_ADDR 0x0084UL
55#define HW_R2B_ADDR_2_REG_ADDR 0x0088UL
56#define HW_R2B_ADDR_3_REG_ADDR 0x008cUL
57#define HW_R2B_ADDR_4_REG_ADDR 0x0090UL
58#define HW_R2B_ADDR_5_REG_ADDR 0x0094UL
59#define HW_R2B_ADDR_6_REG_ADDR 0x0098UL
60#define HW_R2B_ADDR_7_REG_ADDR 0x009cUL
61#define HW_R2B_ADDR_8_REG_ADDR 0x00a0UL
62#define HW_R3B_REG_ADDR 0x00C0UL
63#define HW_R4B_REG_ADDR 0x0100UL
64#define HW_CSA_ADDR_0_REG_ADDR 0x0140UL
65#define HW_CSA_ADDR_1_REG_ADDR 0x0144UL
66#define HW_CSA_ADDR_2_REG_ADDR 0x0148UL
67#define HW_CSA_ADDR_3_REG_ADDR 0x014cUL
68#define HW_CSA_ADDR_4_REG_ADDR 0x0150UL
69#define HW_CSA_ADDR_5_REG_ADDR 0x0154UL
70#define HW_CSA_ADDR_6_REG_ADDR 0x0158UL
71#define HW_CSA_ADDR_7_REG_ADDR 0x015cUL
72#define HW_CSA_ADDR_8_REG_ADDR 0x0160UL
73#define HW_CSA_REG_ADDR 0x0140UL
74#define HW_SINB_REG_ADDR 0x0180UL
75#define HW_SOUTB_REG_ADDR 0x0184UL
76#define HW_PKI_CONTROL_REG_ADDR 0x01C0UL
77#define HW_PKI_STATUS_REG_ADDR 0x01C4UL
78#define HW_PKI_BUSY_REG_ADDR 0x01C8UL
79#define HW_PKI_A_1025_REG_ADDR 0x01CCUL
80#define HW_PKI_SDMA_CTL_REG_ADDR 0x01D0UL
81#define HW_PKI_SDMA_OFFSET_REG_ADDR 0x01D4UL
82#define HW_PKI_SDMA_POINTERS_REG_ADDR 0x01D8UL
83#define HW_PKI_SDMA_DLENG_REG_ADDR 0x01DCUL
84#define HW_PKI_SDMA_EXP_POINTERS_REG_ADDR 0x01E0UL
85#define HW_PKI_SDMA_RES_POINTERS_REG_ADDR 0x01E4UL
86#define HW_PKI_CLR_REG_ADDR 0x01E8UL
87#define HW_PKI_SDMA_BUSY_REG_ADDR 0x01E8UL
88#define HW_PKI_SDMA_FIRST_EXP_N_REG_ADDR 0x01ECUL
89#define HW_PKI_SDMA_MUL_BY1_REG_ADDR 0x01F0UL
90#define HW_PKI_SDMA_RMUL_SEL_REG_ADDR 0x01F4UL
91#define HW_DES_KEY_0_REG_ADDR 0x0208UL
92#define HW_DES_KEY_1_REG_ADDR 0x020CUL
93#define HW_DES_KEY_2_REG_ADDR 0x0210UL
94#define HW_DES_KEY_3_REG_ADDR 0x0214UL
95#define HW_DES_KEY_4_REG_ADDR 0x0218UL
96#define HW_DES_KEY_5_REG_ADDR 0x021CUL
97#define HW_DES_CONTROL_0_REG_ADDR 0x0220UL
98#define HW_DES_CONTROL_1_REG_ADDR 0x0224UL
99#define HW_DES_IV_0_REG_ADDR 0x0228UL
100#define HW_DES_IV_1_REG_ADDR 0x022CUL
101#define HW_AES_KEY_0_ADDR_0_REG_ADDR 0x0400UL
102#define HW_AES_KEY_0_ADDR_1_REG_ADDR 0x0404UL
103#define HW_AES_KEY_0_ADDR_2_REG_ADDR 0x0408UL
104#define HW_AES_KEY_0_ADDR_3_REG_ADDR 0x040cUL
105#define HW_AES_KEY_0_ADDR_4_REG_ADDR 0x0410UL
106#define HW_AES_KEY_0_ADDR_5_REG_ADDR 0x0414UL
107#define HW_AES_KEY_0_ADDR_6_REG_ADDR 0x0418UL
108#define HW_AES_KEY_0_ADDR_7_REG_ADDR 0x041cUL
109#define HW_AES_KEY_0_REG_ADDR 0x0400UL
110#define HW_AES_IV_0_ADDR_0_REG_ADDR 0x0440UL
111#define HW_AES_IV_0_ADDR_1_REG_ADDR 0x0444UL
112#define HW_AES_IV_0_ADDR_2_REG_ADDR 0x0448UL
113#define HW_AES_IV_0_ADDR_3_REG_ADDR 0x044cUL
114#define HW_AES_IV_0_REG_ADDR 0x0440UL
115#define HW_AES_CTR1_ADDR_0_REG_ADDR 0x0460UL
116#define HW_AES_CTR1_ADDR_1_REG_ADDR 0x0464UL
117#define HW_AES_CTR1_ADDR_2_REG_ADDR 0x0468UL
118#define HW_AES_CTR1_ADDR_3_REG_ADDR 0x046cUL
119#define HW_AES_CTR1_REG_ADDR 0x0460UL
120#define HW_AES_SK_REG_ADDR 0x0478UL
121#define HW_AES_MAC_OK_REG_ADDR 0x0480UL
122#define HW_AES_PREV_IV_0_ADDR_0_REG_ADDR 0x0490UL
123#define HW_AES_PREV_IV_0_ADDR_1_REG_ADDR 0x0494UL
124#define HW_AES_PREV_IV_0_ADDR_2_REG_ADDR 0x0498UL
125#define HW_AES_PREV_IV_0_ADDR_3_REG_ADDR 0x049cUL
126#define HW_AES_PREV_IV_0_REG_ADDR 0x0490UL
127#define HW_AES_CONTROL_REG_ADDR 0x04C0UL
128#define HW_HASH_H0_REG_ADDR 0x0640UL
129#define HW_HASH_H1_REG_ADDR 0x0644UL
130#define HW_HASH_H2_REG_ADDR 0x0648UL
131#define HW_HASH_H3_REG_ADDR 0x064CUL
132#define HW_HASH_H4_REG_ADDR 0x0650UL
133#define HW_HASH_H5_REG_ADDR 0x0654UL
134#define HW_HASH_H6_REG_ADDR 0x0658UL
135#define HW_HASH_H7_REG_ADDR 0x065CUL
136#define HW_HASH_H8_REG_ADDR 0x0660UL
137#define HW_HASH_H9_REG_ADDR 0x0664UL
138#define HW_HASH_H10_REG_ADDR 0x0668UL
139#define HW_HASH_H11_REG_ADDR 0x066CUL
140#define HW_HASH_H12_REG_ADDR 0x0670UL
141#define HW_HASH_H13_REG_ADDR 0x0674UL
142#define HW_HASH_H14_REG_ADDR 0x0678UL
143#define HW_HASH_H15_REG_ADDR 0x067CUL
144#define HW_HASH_CONTROL_REG_ADDR 0x07C0UL
145#define HW_HASH_PAD_EN_REG_ADDR 0x07C4UL
146#define HW_HASH_PAD_CFG_REG_ADDR 0x07C8UL
147#define HW_HASH_CUR_LEN_0_REG_ADDR 0x07CCUL
148#define HW_HASH_CUR_LEN_1_REG_ADDR 0x07D0UL
149#define HW_HASH_CUR_LEN_2_REG_ADDR 0x07D4UL
150#define HW_HASH_CUR_LEN_3_REG_ADDR 0x07D8UL
151#define HW_HASH_PARAM_REG_ADDR 0x07DCUL
152#define HW_HASH_INT_BUSY_REG_ADDR 0x07E0UL
153#define HW_HASH_SW_RESET_REG_ADDR 0x07E4UL
154#define HW_HASH_ENDIANESS_REG_ADDR 0x07E8UL
155#define HW_HASH_DATA_REG_ADDR 0x07ECUL
156#define HW_DRNG_CONTROL_REG_ADDR 0x0800UL
157#define HW_DRNG_VALID_REG_ADDR 0x0804UL
158#define HW_DRNG_DATA_REG_ADDR 0x0808UL
159#define HW_RND_SRC_EN_REG_ADDR 0x080CUL
160#define HW_AES_CLK_ENABLE_REG_ADDR 0x0810UL
161#define HW_DES_CLK_ENABLE_REG_ADDR 0x0814UL
162#define HW_HASH_CLK_ENABLE_REG_ADDR 0x0818UL
163#define HW_PKI_CLK_ENABLE_REG_ADDR 0x081CUL
164#define HW_CLK_STATUS_REG_ADDR 0x0824UL
165#define HW_CLK_ENABLE_REG_ADDR 0x0828UL
166#define HW_DRNG_SAMPLE_REG_ADDR 0x0850UL
167#define HW_RND_SRC_CTL_REG_ADDR 0x0858UL
168#define HW_CRYPTO_CTL_REG_ADDR 0x0900UL
169#define HW_CRYPTO_STATUS_REG_ADDR 0x090CUL
170#define HW_CRYPTO_BUSY_REG_ADDR 0x0910UL
171#define HW_AES_BUSY_REG_ADDR 0x0914UL
172#define HW_DES_BUSY_REG_ADDR 0x0918UL
173#define HW_HASH_BUSY_REG_ADDR 0x091CUL
174#define HW_CONTENT_REG_ADDR 0x0924UL
175#define HW_VERSION_REG_ADDR 0x0928UL
176#define HW_CONTEXT_ID_REG_ADDR 0x0930UL
177#define HW_DIN_BUFFER_REG_ADDR 0x0C00UL
178#define HW_DIN_MEM_DMA_BUSY_REG_ADDR 0x0c20UL
179#define HW_SRC_LLI_MEM_ADDR_REG_ADDR 0x0c24UL
180#define HW_SRC_LLI_WORD0_REG_ADDR 0x0C28UL
181#define HW_SRC_LLI_WORD1_REG_ADDR 0x0C2CUL
182#define HW_SRAM_SRC_ADDR_REG_ADDR 0x0c30UL
183#define HW_DIN_SRAM_BYTES_LEN_REG_ADDR 0x0c34UL
184#define HW_DIN_SRAM_DMA_BUSY_REG_ADDR 0x0C38UL
185#define HW_WRITE_ALIGN_REG_ADDR 0x0C3CUL
186#define HW_OLD_DATA_REG_ADDR 0x0C48UL
187#define HW_WRITE_ALIGN_LAST_REG_ADDR 0x0C4CUL
188#define HW_DOUT_BUFFER_REG_ADDR 0x0C00UL
189#define HW_DST_LLI_WORD0_REG_ADDR 0x0D28UL
190#define HW_DST_LLI_WORD1_REG_ADDR 0x0D2CUL
191#define HW_DST_LLI_MEM_ADDR_REG_ADDR 0x0D24UL
192#define HW_DOUT_MEM_DMA_BUSY_REG_ADDR 0x0D20UL
193#define HW_SRAM_DEST_ADDR_REG_ADDR 0x0D30UL
194#define HW_DOUT_SRAM_BYTES_LEN_REG_ADDR 0x0D34UL
195#define HW_DOUT_SRAM_DMA_BUSY_REG_ADDR 0x0D38UL
196#define HW_READ_ALIGN_REG_ADDR 0x0D3CUL
197#define HW_READ_LAST_DATA_REG_ADDR 0x0D44UL
198#define HW_RC4_THRU_CPU_REG_ADDR 0x0D4CUL
199#define HW_AHB_SINGLE_REG_ADDR 0x0E00UL
200#define HW_SRAM_DATA_REG_ADDR 0x0F00UL
201#define HW_SRAM_ADDR_REG_ADDR 0x0F04UL
202#define HW_SRAM_DATA_READY_REG_ADDR 0x0F08UL
203#define HW_HOST_IRR_REG_ADDR 0x0A00UL
204#define HW_HOST_IMR_REG_ADDR 0x0A04UL
205#define HW_HOST_ICR_REG_ADDR 0x0A08UL
206#define HW_HOST_SEP_SRAM_THRESHOLD_REG_ADDR 0x0A10UL
207#define HW_HOST_SEP_BUSY_REG_ADDR 0x0A14UL
208#define HW_HOST_SEP_LCS_REG_ADDR 0x0A18UL
209#define HW_HOST_CC_SW_RST_REG_ADDR 0x0A40UL
210#define HW_HOST_SEP_SW_RST_REG_ADDR 0x0A44UL
211#define HW_HOST_FLOW_DMA_SW_INT0_REG_ADDR 0x0A80UL
212#define HW_HOST_FLOW_DMA_SW_INT1_REG_ADDR 0x0A84UL
213#define HW_HOST_FLOW_DMA_SW_INT2_REG_ADDR 0x0A88UL
214#define HW_HOST_FLOW_DMA_SW_INT3_REG_ADDR 0x0A8cUL
215#define HW_HOST_FLOW_DMA_SW_INT4_REG_ADDR 0x0A90UL
216#define HW_HOST_FLOW_DMA_SW_INT5_REG_ADDR 0x0A94UL
217#define HW_HOST_FLOW_DMA_SW_INT6_REG_ADDR 0x0A98UL
218#define HW_HOST_FLOW_DMA_SW_INT7_REG_ADDR 0x0A9cUL
219#define HW_HOST_SEP_HOST_GPR0_REG_ADDR 0x0B00UL
220#define HW_HOST_SEP_HOST_GPR1_REG_ADDR 0x0B04UL
221#define HW_HOST_SEP_HOST_GPR2_REG_ADDR 0x0B08UL
222#define HW_HOST_SEP_HOST_GPR3_REG_ADDR 0x0B0CUL
223#define HW_HOST_HOST_SEP_GPR0_REG_ADDR 0x0B80UL
224#define HW_HOST_HOST_SEP_GPR1_REG_ADDR 0x0B84UL
225#define HW_HOST_HOST_SEP_GPR2_REG_ADDR 0x0B88UL
226#define HW_HOST_HOST_SEP_GPR3_REG_ADDR 0x0B8CUL
227#define HW_HOST_HOST_ENDIAN_REG_ADDR 0x0B90UL
228#define HW_HOST_HOST_COMM_CLK_EN_REG_ADDR 0x0B94UL
229#define HW_CLR_SRAM_BUSY_REG_REG_ADDR 0x0F0CUL
230#define HW_CC_SRAM_BASE_ADDRESS 0x5800UL
231
232#endif /* ifndef HW_DEFS */
diff --git a/drivers/staging/spectra/Kconfig b/drivers/staging/spectra/Kconfig
index 5e2ffefb60af..d231ae27299d 100644
--- a/drivers/staging/spectra/Kconfig
+++ b/drivers/staging/spectra/Kconfig
@@ -2,6 +2,7 @@
2menuconfig SPECTRA 2menuconfig SPECTRA
3 tristate "Denali Spectra Flash Translation Layer" 3 tristate "Denali Spectra Flash Translation Layer"
4 depends on BLOCK 4 depends on BLOCK
5 depends on X86_MRST
5 default n 6 default n
6 ---help--- 7 ---help---
7 Enable the FTL pseudo-filesystem used with the NAND Flash 8 Enable the FTL pseudo-filesystem used with the NAND Flash
diff --git a/drivers/staging/spectra/ffsport.c b/drivers/staging/spectra/ffsport.c
index d0c5c97eda3e..fa21a0fd8e84 100644
--- a/drivers/staging/spectra/ffsport.c
+++ b/drivers/staging/spectra/ffsport.c
@@ -27,6 +27,8 @@
27#include <linux/kthread.h> 27#include <linux/kthread.h>
28#include <linux/log2.h> 28#include <linux/log2.h>
29#include <linux/init.h> 29#include <linux/init.h>
30#include <linux/smp_lock.h>
31#include <linux/slab.h>
30 32
31/**** Helper functions used for Div, Remainder operation on u64 ****/ 33/**** Helper functions used for Div, Remainder operation on u64 ****/
32 34
@@ -113,7 +115,6 @@ u64 GLOB_u64_Remainder(u64 addr, u32 divisor_type)
113 115
114#define GLOB_SBD_NAME "nd" 116#define GLOB_SBD_NAME "nd"
115#define GLOB_SBD_IRQ_NUM (29) 117#define GLOB_SBD_IRQ_NUM (29)
116#define GLOB_VERSION "driver version 20091110"
117 118
118#define GLOB_SBD_IOCTL_GC (0x7701) 119#define GLOB_SBD_IOCTL_GC (0x7701)
119#define GLOB_SBD_IOCTL_WL (0x7702) 120#define GLOB_SBD_IOCTL_WL (0x7702)
@@ -272,13 +273,6 @@ static int get_res_blk_num_os(void)
272 return res_blks; 273 return res_blks;
273} 274}
274 275
275static void SBD_prepare_flush(struct request_queue *q, struct request *rq)
276{
277 rq->cmd_type = REQ_TYPE_LINUX_BLOCK;
278 /* rq->timeout = 5 * HZ; */
279 rq->cmd[0] = REQ_LB_OP_FLUSH;
280}
281
282/* Transfer a full request. */ 276/* Transfer a full request. */
283static int do_transfer(struct spectra_nand_dev *tr, struct request *req) 277static int do_transfer(struct spectra_nand_dev *tr, struct request *req)
284{ 278{
@@ -296,8 +290,7 @@ static int do_transfer(struct spectra_nand_dev *tr, struct request *req)
296 IdentifyDeviceData.PagesPerBlock * 290 IdentifyDeviceData.PagesPerBlock *
297 res_blks_os; 291 res_blks_os;
298 292
299 if (req->cmd_type == REQ_TYPE_LINUX_BLOCK && 293 if (req->cmd_type & REQ_FLUSH) {
300 req->cmd[0] == REQ_LB_OP_FLUSH) {
301 if (force_flush_cache()) /* Fail to flush cache */ 294 if (force_flush_cache()) /* Fail to flush cache */
302 return -EIO; 295 return -EIO;
303 else 296 else
@@ -597,11 +590,23 @@ int GLOB_SBD_ioctl(struct block_device *bdev, fmode_t mode,
597 return -ENOTTY; 590 return -ENOTTY;
598} 591}
599 592
593int GLOB_SBD_unlocked_ioctl(struct block_device *bdev, fmode_t mode,
594 unsigned int cmd, unsigned long arg)
595{
596 int ret;
597
598 lock_kernel();
599 ret = GLOB_SBD_ioctl(bdev, mode, cmd, arg);
600 unlock_kernel();
601
602 return ret;
603}
604
600static struct block_device_operations GLOB_SBD_ops = { 605static struct block_device_operations GLOB_SBD_ops = {
601 .owner = THIS_MODULE, 606 .owner = THIS_MODULE,
602 .open = GLOB_SBD_open, 607 .open = GLOB_SBD_open,
603 .release = GLOB_SBD_release, 608 .release = GLOB_SBD_release,
604 .locked_ioctl = GLOB_SBD_ioctl, 609 .ioctl = GLOB_SBD_unlocked_ioctl,
605 .getgeo = GLOB_SBD_getgeo, 610 .getgeo = GLOB_SBD_getgeo,
606}; 611};
607 612
@@ -650,8 +655,7 @@ static int SBD_setup_device(struct spectra_nand_dev *dev, int which)
650 /* Here we force report 512 byte hardware sector size to Kernel */ 655 /* Here we force report 512 byte hardware sector size to Kernel */
651 blk_queue_logical_block_size(dev->queue, 512); 656 blk_queue_logical_block_size(dev->queue, 512);
652 657
653 blk_queue_ordered(dev->queue, QUEUE_ORDERED_DRAIN_FLUSH, 658 blk_queue_ordered(dev->queue, QUEUE_ORDERED_DRAIN_FLUSH);
654 SBD_prepare_flush);
655 659
656 dev->thread = kthread_run(spectra_trans_thread, dev, "nand_thd"); 660 dev->thread = kthread_run(spectra_trans_thread, dev, "nand_thd");
657 if (IS_ERR(dev->thread)) { 661 if (IS_ERR(dev->thread)) {
diff --git a/drivers/staging/spectra/flash.c b/drivers/staging/spectra/flash.c
index 134aa5166a8d..9b5218b6ada8 100644
--- a/drivers/staging/spectra/flash.c
+++ b/drivers/staging/spectra/flash.c
@@ -61,7 +61,6 @@ static void FTL_Cache_Read_Page(u8 *pData, u64 dwPageAddr,
61static void FTL_Cache_Write_Page(u8 *pData, u64 dwPageAddr, 61static void FTL_Cache_Write_Page(u8 *pData, u64 dwPageAddr,
62 u8 cache_blk, u16 flag); 62 u8 cache_blk, u16 flag);
63static int FTL_Cache_Write(void); 63static int FTL_Cache_Write(void);
64static int FTL_Cache_Write_Back(u8 *pData, u64 blk_addr);
65static void FTL_Calculate_LRU(void); 64static void FTL_Calculate_LRU(void);
66static u32 FTL_Get_Block_Index(u32 wBlockNum); 65static u32 FTL_Get_Block_Index(u32 wBlockNum);
67 66
@@ -86,8 +85,6 @@ static u32 FTL_Replace_MWBlock(void);
86static int FTL_Replace_Block(u64 blk_addr); 85static int FTL_Replace_Block(u64 blk_addr);
87static int FTL_Adjust_Relative_Erase_Count(u32 Index_of_MAX); 86static int FTL_Adjust_Relative_Erase_Count(u32 Index_of_MAX);
88 87
89static int FTL_Flash_Error_Handle(u8 *pData, u64 old_page_addr, u64 blk_addr);
90
91struct device_info_tag DeviceInfo; 88struct device_info_tag DeviceInfo;
92struct flash_cache_tag Cache; 89struct flash_cache_tag Cache;
93static struct spectra_l2_cache_info cache_l2; 90static struct spectra_l2_cache_info cache_l2;
@@ -775,7 +772,7 @@ static void dump_cache_l2_table(void)
775{ 772{
776 struct list_head *p; 773 struct list_head *p;
777 struct spectra_l2_cache_list *pnd; 774 struct spectra_l2_cache_list *pnd;
778 int n, i; 775 int n;
779 776
780 n = 0; 777 n = 0;
781 list_for_each(p, &cache_l2.table.list) { 778 list_for_each(p, &cache_l2.table.list) {
@@ -1538,79 +1535,6 @@ static int FTL_Cache_Write_All(u8 *pData, u64 blk_addr)
1538} 1535}
1539 1536
1540/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& 1537/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1541* Function: FTL_Cache_Update_Block
1542* Inputs: pointer to buffer,page address,block address
1543* Outputs: PASS=0 / FAIL=1
1544* Description: It updates the cache
1545*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1546static int FTL_Cache_Update_Block(u8 *pData,
1547 u64 old_page_addr, u64 blk_addr)
1548{
1549 int i, j;
1550 u8 *buf = pData;
1551 int wResult = PASS;
1552 int wFoundInCache;
1553 u64 page_addr;
1554 u64 addr;
1555 u64 old_blk_addr;
1556 u16 page_offset;
1557
1558 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1559 __FILE__, __LINE__, __func__);
1560
1561 old_blk_addr = (u64)(old_page_addr >>
1562 DeviceInfo.nBitsInBlockDataSize) * DeviceInfo.wBlockDataSize;
1563 page_offset = (u16)(GLOB_u64_Remainder(old_page_addr, 2) >>
1564 DeviceInfo.nBitsInPageDataSize);
1565
1566 for (i = 0; i < DeviceInfo.wPagesPerBlock; i += Cache.pages_per_item) {
1567 page_addr = old_blk_addr + i * DeviceInfo.wPageDataSize;
1568 if (i != page_offset) {
1569 wFoundInCache = FAIL;
1570 for (j = 0; j < CACHE_ITEM_NUM; j++) {
1571 addr = Cache.array[j].address;
1572 addr = FTL_Get_Physical_Block_Addr(addr) +
1573 GLOB_u64_Remainder(addr, 2);
1574 if ((addr >= page_addr) && addr <
1575 (page_addr + Cache.cache_item_size)) {
1576 wFoundInCache = PASS;
1577 buf = Cache.array[j].buf;
1578 Cache.array[j].changed = SET;
1579#if CMD_DMA
1580#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
1581 int_cache[ftl_cmd_cnt].item = j;
1582 int_cache[ftl_cmd_cnt].cache.address =
1583 Cache.array[j].address;
1584 int_cache[ftl_cmd_cnt].cache.changed =
1585 Cache.array[j].changed;
1586#endif
1587#endif
1588 break;
1589 }
1590 }
1591 if (FAIL == wFoundInCache) {
1592 if (ERR == FTL_Cache_Read_All(g_pTempBuf,
1593 page_addr)) {
1594 wResult = FAIL;
1595 break;
1596 }
1597 buf = g_pTempBuf;
1598 }
1599 } else {
1600 buf = pData;
1601 }
1602
1603 if (FAIL == FTL_Cache_Write_All(buf,
1604 blk_addr + (page_addr - old_blk_addr))) {
1605 wResult = FAIL;
1606 break;
1607 }
1608 }
1609
1610 return wResult;
1611}
1612
1613/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1614* Function: FTL_Copy_Block 1538* Function: FTL_Copy_Block
1615* Inputs: source block address 1539* Inputs: source block address
1616* Destination block address 1540* Destination block address
@@ -1698,7 +1622,7 @@ static int get_l2_cache_blks(void)
1698static int erase_l2_cache_blocks(void) 1622static int erase_l2_cache_blocks(void)
1699{ 1623{
1700 int i, ret = PASS; 1624 int i, ret = PASS;
1701 u32 pblk, lblk; 1625 u32 pblk, lblk = BAD_BLOCK;
1702 u64 addr; 1626 u64 addr;
1703 u32 *pbt = (u32 *)g_pBlockTable; 1627 u32 *pbt = (u32 *)g_pBlockTable;
1704 1628
@@ -2004,87 +1928,6 @@ static int search_l2_cache(u8 *buf, u64 logical_addr)
2004 return ret; 1928 return ret;
2005} 1929}
2006 1930
2007/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2008* Function: FTL_Cache_Write_Back
2009* Inputs: pointer to data cached in sys memory
2010* address of free block in flash
2011* Outputs: PASS=0 / FAIL=1
2012* Description: writes all the pages of Cache Block to flash
2013*
2014*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2015static int FTL_Cache_Write_Back(u8 *pData, u64 blk_addr)
2016{
2017 int i, j, iErase;
2018 u64 old_page_addr, addr, phy_addr;
2019 u32 *pbt = (u32 *)g_pBlockTable;
2020 u32 lba;
2021
2022 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
2023 __FILE__, __LINE__, __func__);
2024
2025 old_page_addr = FTL_Get_Physical_Block_Addr(blk_addr) +
2026 GLOB_u64_Remainder(blk_addr, 2);
2027
2028 iErase = (FAIL == FTL_Replace_Block(blk_addr)) ? PASS : FAIL;
2029
2030 pbt[BLK_FROM_ADDR(blk_addr)] &= (~SPARE_BLOCK);
2031
2032#if CMD_DMA
2033 p_BTableChangesDelta = (struct BTableChangesDelta *)g_pBTDelta_Free;
2034 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
2035
2036 p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
2037 p_BTableChangesDelta->BT_Index = (u32)(blk_addr >>
2038 DeviceInfo.nBitsInBlockDataSize);
2039 p_BTableChangesDelta->BT_Entry_Value =
2040 pbt[(u32)(blk_addr >> DeviceInfo.nBitsInBlockDataSize)];
2041 p_BTableChangesDelta->ValidFields = 0x0C;
2042#endif
2043
2044 if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
2045 g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
2046 FTL_Write_IN_Progress_Block_Table_Page();
2047 }
2048
2049 for (i = 0; i < RETRY_TIMES; i++) {
2050 if (PASS == iErase) {
2051 phy_addr = FTL_Get_Physical_Block_Addr(blk_addr);
2052 if (FAIL == GLOB_FTL_Block_Erase(phy_addr)) {
2053 lba = BLK_FROM_ADDR(blk_addr);
2054 MARK_BLOCK_AS_BAD(pbt[lba]);
2055 i = RETRY_TIMES;
2056 break;
2057 }
2058 }
2059
2060 for (j = 0; j < CACHE_ITEM_NUM; j++) {
2061 addr = Cache.array[j].address;
2062 if ((addr <= blk_addr) &&
2063 ((addr + Cache.cache_item_size) > blk_addr))
2064 cache_block_to_write = j;
2065 }
2066
2067 phy_addr = FTL_Get_Physical_Block_Addr(blk_addr);
2068 if (PASS == FTL_Cache_Update_Block(pData,
2069 old_page_addr, phy_addr)) {
2070 cache_block_to_write = UNHIT_CACHE_ITEM;
2071 break;
2072 } else {
2073 iErase = PASS;
2074 }
2075 }
2076
2077 if (i >= RETRY_TIMES) {
2078 if (ERR == FTL_Flash_Error_Handle(pData,
2079 old_page_addr, blk_addr))
2080 return ERR;
2081 else
2082 return FAIL;
2083 }
2084
2085 return PASS;
2086}
2087
2088/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& 1931/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2089* Function: FTL_Cache_Write_Page 1932* Function: FTL_Cache_Write_Page
2090* Inputs: Pointer to buffer, page address, cache block number 1933* Inputs: Pointer to buffer, page address, cache block number
@@ -2370,159 +2213,6 @@ static int FTL_Write_Block_Table(int wForce)
2370 return 1; 2213 return 1;
2371} 2214}
2372 2215
2373/******************************************************************
2374* Function: GLOB_FTL_Flash_Format
2375* Inputs: none
2376* Outputs: PASS
2377* Description: The block table stores bad block info, including MDF+
2378* blocks gone bad over the ages. Therefore, if we have a
2379* block table in place, then use it to scan for bad blocks
2380* If not, then scan for MDF.
2381* Now, a block table will only be found if spectra was already
2382* being used. For a fresh flash, we'll go thru scanning for
2383* MDF. If spectra was being used, then there is a chance that
2384* the MDF has been corrupted. Spectra avoids writing to the
2385* first 2 bytes of the spare area to all pages in a block. This
2386* covers all known flash devices. However, since flash
2387* manufacturers have no standard of where the MDF is stored,
2388* this cannot guarantee that the MDF is protected for future
2389* devices too. The initial scanning for the block table assures
2390* this. It is ok even if the block table is outdated, as all
2391* we're looking for are bad block markers.
2392* Use this when mounting a file system or starting a
2393* new flash.
2394*
2395*********************************************************************/
2396static int FTL_Format_Flash(u8 valid_block_table)
2397{
2398 u32 i, j;
2399 u32 *pbt = (u32 *)g_pBlockTable;
2400 u32 tempNode;
2401 int ret;
2402
2403#if CMD_DMA
2404 u32 *pbtStartingCopy = (u32 *)g_pBTStartingCopy;
2405 if (ftl_cmd_cnt)
2406 return FAIL;
2407#endif
2408
2409 if (FAIL == FTL_Check_Block_Table(FAIL))
2410 valid_block_table = 0;
2411
2412 if (valid_block_table) {
2413 u8 switched = 1;
2414 u32 block, k;
2415
2416 k = DeviceInfo.wSpectraStartBlock;
2417 while (switched && (k < DeviceInfo.wSpectraEndBlock)) {
2418 switched = 0;
2419 k++;
2420 for (j = DeviceInfo.wSpectraStartBlock, i = 0;
2421 j <= DeviceInfo.wSpectraEndBlock;
2422 j++, i++) {
2423 block = (pbt[i] & ~BAD_BLOCK) -
2424 DeviceInfo.wSpectraStartBlock;
2425 if (block != i) {
2426 switched = 1;
2427 tempNode = pbt[i];
2428 pbt[i] = pbt[block];
2429 pbt[block] = tempNode;
2430 }
2431 }
2432 }
2433 if ((k == DeviceInfo.wSpectraEndBlock) && switched)
2434 valid_block_table = 0;
2435 }
2436
2437 if (!valid_block_table) {
2438 memset(g_pBlockTable, 0,
2439 DeviceInfo.wDataBlockNum * sizeof(u32));
2440 memset(g_pWearCounter, 0,
2441 DeviceInfo.wDataBlockNum * sizeof(u8));
2442 if (DeviceInfo.MLCDevice)
2443 memset(g_pReadCounter, 0,
2444 DeviceInfo.wDataBlockNum * sizeof(u16));
2445#if CMD_DMA
2446 memset(g_pBTStartingCopy, 0,
2447 DeviceInfo.wDataBlockNum * sizeof(u32));
2448 memset(g_pWearCounterCopy, 0,
2449 DeviceInfo.wDataBlockNum * sizeof(u8));
2450 if (DeviceInfo.MLCDevice)
2451 memset(g_pReadCounterCopy, 0,
2452 DeviceInfo.wDataBlockNum * sizeof(u16));
2453#endif
2454 for (j = DeviceInfo.wSpectraStartBlock, i = 0;
2455 j <= DeviceInfo.wSpectraEndBlock;
2456 j++, i++) {
2457 if (GLOB_LLD_Get_Bad_Block((u32)j))
2458 pbt[i] = (u32)(BAD_BLOCK | j);
2459 }
2460 }
2461
2462 nand_dbg_print(NAND_DBG_WARN, "Erasing all blocks in the NAND\n");
2463
2464 for (j = DeviceInfo.wSpectraStartBlock, i = 0;
2465 j <= DeviceInfo.wSpectraEndBlock;
2466 j++, i++) {
2467 if ((pbt[i] & BAD_BLOCK) != BAD_BLOCK) {
2468 ret = GLOB_LLD_Erase_Block(j);
2469 if (FAIL == ret) {
2470 pbt[i] = (u32)(j);
2471 MARK_BLOCK_AS_BAD(pbt[i]);
2472 nand_dbg_print(NAND_DBG_WARN,
2473 "NAND Program fail in %s, Line %d, "
2474 "Function: %s, new Bad Block %d generated!\n",
2475 __FILE__, __LINE__, __func__, (int)j);
2476 } else {
2477 pbt[i] = (u32)(SPARE_BLOCK | j);
2478 }
2479 }
2480#if CMD_DMA
2481 pbtStartingCopy[i] = pbt[i];
2482#endif
2483 }
2484
2485 g_wBlockTableOffset = 0;
2486 for (i = 0; (i <= (DeviceInfo.wSpectraEndBlock -
2487 DeviceInfo.wSpectraStartBlock))
2488 && ((pbt[i] & BAD_BLOCK) == BAD_BLOCK); i++)
2489 ;
2490 if (i > (DeviceInfo.wSpectraEndBlock - DeviceInfo.wSpectraStartBlock)) {
2491 printk(KERN_ERR "All blocks bad!\n");
2492 return FAIL;
2493 } else {
2494 g_wBlockTableIndex = pbt[i] & ~BAD_BLOCK;
2495 if (i != BLOCK_TABLE_INDEX) {
2496 tempNode = pbt[i];
2497 pbt[i] = pbt[BLOCK_TABLE_INDEX];
2498 pbt[BLOCK_TABLE_INDEX] = tempNode;
2499 }
2500 }
2501 pbt[BLOCK_TABLE_INDEX] &= (~SPARE_BLOCK);
2502
2503#if CMD_DMA
2504 pbtStartingCopy[BLOCK_TABLE_INDEX] &= (~SPARE_BLOCK);
2505#endif
2506
2507 g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
2508 memset(g_pBTBlocks, 0xFF,
2509 (1 + LAST_BT_ID - FIRST_BT_ID) * sizeof(u32));
2510 g_pBTBlocks[FIRST_BT_ID-FIRST_BT_ID] = g_wBlockTableIndex;
2511 FTL_Write_Block_Table(FAIL);
2512
2513 for (i = 0; i < CACHE_ITEM_NUM; i++) {
2514 Cache.array[i].address = NAND_CACHE_INIT_ADDR;
2515 Cache.array[i].use_cnt = 0;
2516 Cache.array[i].changed = CLEAR;
2517 }
2518
2519#if (RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE && CMD_DMA)
2520 memcpy((void *)&cache_start_copy, (void *)&Cache,
2521 sizeof(struct flash_cache_tag));
2522#endif
2523 return PASS;
2524}
2525
2526static int force_format_nand(void) 2216static int force_format_nand(void)
2527{ 2217{
2528 u32 i; 2218 u32 i;
@@ -3031,112 +2721,6 @@ static int FTL_Read_Block_Table(void)
3031 return wResult; 2721 return wResult;
3032} 2722}
3033 2723
3034
3035/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3036* Function: FTL_Flash_Error_Handle
3037* Inputs: Pointer to data
3038* Page address
3039* Block address
3040* Outputs: PASS=0 / FAIL=1
3041* Description: It handles any error occured during Spectra operation
3042*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3043static int FTL_Flash_Error_Handle(u8 *pData, u64 old_page_addr,
3044 u64 blk_addr)
3045{
3046 u32 i;
3047 int j;
3048 u32 tmp_node, blk_node = BLK_FROM_ADDR(blk_addr);
3049 u64 phy_addr;
3050 int wErase = FAIL;
3051 int wResult = FAIL;
3052 u32 *pbt = (u32 *)g_pBlockTable;
3053
3054 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
3055 __FILE__, __LINE__, __func__);
3056
3057 if (ERR == GLOB_FTL_Garbage_Collection())
3058 return ERR;
3059
3060 do {
3061 for (i = DeviceInfo.wSpectraEndBlock -
3062 DeviceInfo.wSpectraStartBlock;
3063 i > 0; i--) {
3064 if (IS_SPARE_BLOCK(i)) {
3065 tmp_node = (u32)(BAD_BLOCK |
3066 pbt[blk_node]);
3067 pbt[blk_node] = (u32)(pbt[i] &
3068 (~SPARE_BLOCK));
3069 pbt[i] = tmp_node;
3070#if CMD_DMA
3071 p_BTableChangesDelta =
3072 (struct BTableChangesDelta *)
3073 g_pBTDelta_Free;
3074 g_pBTDelta_Free +=
3075 sizeof(struct BTableChangesDelta);
3076
3077 p_BTableChangesDelta->ftl_cmd_cnt =
3078 ftl_cmd_cnt;
3079 p_BTableChangesDelta->BT_Index =
3080 blk_node;
3081 p_BTableChangesDelta->BT_Entry_Value =
3082 pbt[blk_node];
3083 p_BTableChangesDelta->ValidFields = 0x0C;
3084
3085 p_BTableChangesDelta =
3086 (struct BTableChangesDelta *)
3087 g_pBTDelta_Free;
3088 g_pBTDelta_Free +=
3089 sizeof(struct BTableChangesDelta);
3090
3091 p_BTableChangesDelta->ftl_cmd_cnt =
3092 ftl_cmd_cnt;
3093 p_BTableChangesDelta->BT_Index = i;
3094 p_BTableChangesDelta->BT_Entry_Value = pbt[i];
3095 p_BTableChangesDelta->ValidFields = 0x0C;
3096#endif
3097 wResult = PASS;
3098 break;
3099 }
3100 }
3101
3102 if (FAIL == wResult) {
3103 if (FAIL == GLOB_FTL_Garbage_Collection())
3104 break;
3105 else
3106 continue;
3107 }
3108
3109 if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
3110 g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
3111 FTL_Write_IN_Progress_Block_Table_Page();
3112 }
3113
3114 phy_addr = FTL_Get_Physical_Block_Addr(blk_addr);
3115
3116 for (j = 0; j < RETRY_TIMES; j++) {
3117 if (PASS == wErase) {
3118 if (FAIL == GLOB_FTL_Block_Erase(phy_addr)) {
3119 MARK_BLOCK_AS_BAD(pbt[blk_node]);
3120 break;
3121 }
3122 }
3123 if (PASS == FTL_Cache_Update_Block(pData,
3124 old_page_addr,
3125 phy_addr)) {
3126 wResult = PASS;
3127 break;
3128 } else {
3129 wResult = FAIL;
3130 wErase = PASS;
3131 }
3132 }
3133 } while (FAIL == wResult);
3134
3135 FTL_Write_Block_Table(FAIL);
3136
3137 return wResult;
3138}
3139
3140/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& 2724/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3141* Function: FTL_Get_Page_Num 2725* Function: FTL_Get_Page_Num
3142* Inputs: Size in bytes 2726* Inputs: Size in bytes
diff --git a/drivers/staging/ti-st/st.h b/drivers/staging/ti-st/st.h
index 9952579425b9..1b3060eb2921 100644
--- a/drivers/staging/ti-st/st.h
+++ b/drivers/staging/ti-st/st.h
@@ -80,5 +80,4 @@ struct st_proto_s {
80extern long st_register(struct st_proto_s *); 80extern long st_register(struct st_proto_s *);
81extern long st_unregister(enum proto_type); 81extern long st_unregister(enum proto_type);
82 82
83extern struct platform_device *st_get_plat_device(void);
84#endif /* ST_H */ 83#endif /* ST_H */
diff --git a/drivers/staging/ti-st/st_core.c b/drivers/staging/ti-st/st_core.c
index 063c9b1db1ab..b85d8bfdf600 100644
--- a/drivers/staging/ti-st/st_core.c
+++ b/drivers/staging/ti-st/st_core.c
@@ -38,7 +38,6 @@
38#include "st_ll.h" 38#include "st_ll.h"
39#include "st.h" 39#include "st.h"
40 40
41#define VERBOSE
42/* strings to be used for rfkill entries and by 41/* strings to be used for rfkill entries and by
43 * ST Core to be used for sysfs debug entry 42 * ST Core to be used for sysfs debug entry
44 */ 43 */
@@ -581,7 +580,7 @@ long st_register(struct st_proto_s *new_proto)
581 long err = 0; 580 long err = 0;
582 unsigned long flags = 0; 581 unsigned long flags = 0;
583 582
584 st_kim_ref(&st_gdata); 583 st_kim_ref(&st_gdata, 0);
585 pr_info("%s(%d) ", __func__, new_proto->type); 584 pr_info("%s(%d) ", __func__, new_proto->type);
586 if (st_gdata == NULL || new_proto == NULL || new_proto->recv == NULL 585 if (st_gdata == NULL || new_proto == NULL || new_proto->recv == NULL
587 || new_proto->reg_complete_cb == NULL) { 586 || new_proto->reg_complete_cb == NULL) {
@@ -713,7 +712,7 @@ long st_unregister(enum proto_type type)
713 712
714 pr_debug("%s: %d ", __func__, type); 713 pr_debug("%s: %d ", __func__, type);
715 714
716 st_kim_ref(&st_gdata); 715 st_kim_ref(&st_gdata, 0);
717 if (type < ST_BT || type >= ST_MAX) { 716 if (type < ST_BT || type >= ST_MAX) {
718 pr_err(" protocol %d not supported", type); 717 pr_err(" protocol %d not supported", type);
719 return -EPROTONOSUPPORT; 718 return -EPROTONOSUPPORT;
@@ -767,7 +766,7 @@ long st_write(struct sk_buff *skb)
767#endif 766#endif
768 long len; 767 long len;
769 768
770 st_kim_ref(&st_gdata); 769 st_kim_ref(&st_gdata, 0);
771 if (unlikely(skb == NULL || st_gdata == NULL 770 if (unlikely(skb == NULL || st_gdata == NULL
772 || st_gdata->tty == NULL)) { 771 || st_gdata->tty == NULL)) {
773 pr_err("data/tty unavailable to perform write"); 772 pr_err("data/tty unavailable to perform write");
@@ -818,7 +817,7 @@ static int st_tty_open(struct tty_struct *tty)
818 struct st_data_s *st_gdata; 817 struct st_data_s *st_gdata;
819 pr_info("%s ", __func__); 818 pr_info("%s ", __func__);
820 819
821 st_kim_ref(&st_gdata); 820 st_kim_ref(&st_gdata, 0);
822 st_gdata->tty = tty; 821 st_gdata->tty = tty;
823 tty->disc_data = st_gdata; 822 tty->disc_data = st_gdata;
824 823
diff --git a/drivers/staging/ti-st/st_core.h b/drivers/staging/ti-st/st_core.h
index e0c32d149f5f..8601320a679e 100644
--- a/drivers/staging/ti-st/st_core.h
+++ b/drivers/staging/ti-st/st_core.h
@@ -117,7 +117,7 @@ int st_core_init(struct st_data_s **);
117void st_core_exit(struct st_data_s *); 117void st_core_exit(struct st_data_s *);
118 118
119/* ask for reference from KIM */ 119/* ask for reference from KIM */
120void st_kim_ref(struct st_data_s **); 120void st_kim_ref(struct st_data_s **, int);
121 121
122#define GPS_STUB_TEST 122#define GPS_STUB_TEST
123#ifdef GPS_STUB_TEST 123#ifdef GPS_STUB_TEST
diff --git a/drivers/staging/ti-st/st_kim.c b/drivers/staging/ti-st/st_kim.c
index b4a6c7fdc4e6..9e99463f76e8 100644
--- a/drivers/staging/ti-st/st_kim.c
+++ b/drivers/staging/ti-st/st_kim.c
@@ -72,11 +72,26 @@ const unsigned char *protocol_names[] = {
72 PROTO_ENTRY(ST_GPS, "GPS"), 72 PROTO_ENTRY(ST_GPS, "GPS"),
73}; 73};
74 74
75#define MAX_ST_DEVICES 3 /* Imagine 1 on each UART for now */
76struct platform_device *st_kim_devices[MAX_ST_DEVICES];
75 77
76/**********************************************************************/ 78/**********************************************************************/
77/* internal functions */ 79/* internal functions */
78 80
79/** 81/**
82 * st_get_plat_device -
83 * function which returns the reference to the platform device
84 * requested by id. As of now only 1 such device exists (id=0)
85 * the context requesting for reference can get the id to be
86 * requested by a. The protocol driver which is registering or
87 * b. the tty device which is opened.
88 */
89static struct platform_device *st_get_plat_device(int id)
90{
91 return st_kim_devices[id];
92}
93
94/**
80 * validate_firmware_response - 95 * validate_firmware_response -
81 * function to return whether the firmware response was proper 96 * function to return whether the firmware response was proper
82 * in case of error don't complete so that waiting for proper 97 * in case of error don't complete so that waiting for proper
@@ -353,7 +368,7 @@ void st_kim_chip_toggle(enum proto_type type, enum kim_gpio_state state)
353 struct kim_data_s *kim_gdata; 368 struct kim_data_s *kim_gdata;
354 pr_info(" %s ", __func__); 369 pr_info(" %s ", __func__);
355 370
356 kim_pdev = st_get_plat_device(); 371 kim_pdev = st_get_plat_device(0);
357 kim_gdata = dev_get_drvdata(&kim_pdev->dev); 372 kim_gdata = dev_get_drvdata(&kim_pdev->dev);
358 373
359 if (kim_gdata->gpios[type] == -1) { 374 if (kim_gdata->gpios[type] == -1) {
@@ -574,12 +589,12 @@ static int kim_toggle_radio(void *data, bool blocked)
574 * This would enable multiple such platform devices to exist 589 * This would enable multiple such platform devices to exist
575 * on a given platform 590 * on a given platform
576 */ 591 */
577void st_kim_ref(struct st_data_s **core_data) 592void st_kim_ref(struct st_data_s **core_data, int id)
578{ 593{
579 struct platform_device *pdev; 594 struct platform_device *pdev;
580 struct kim_data_s *kim_gdata; 595 struct kim_data_s *kim_gdata;
581 /* get kim_gdata reference from platform device */ 596 /* get kim_gdata reference from platform device */
582 pdev = st_get_plat_device(); 597 pdev = st_get_plat_device(id);
583 kim_gdata = dev_get_drvdata(&pdev->dev); 598 kim_gdata = dev_get_drvdata(&pdev->dev);
584 *core_data = kim_gdata->core_data; 599 *core_data = kim_gdata->core_data;
585} 600}
@@ -623,6 +638,7 @@ static int kim_probe(struct platform_device *pdev)
623 long *gpios = pdev->dev.platform_data; 638 long *gpios = pdev->dev.platform_data;
624 struct kim_data_s *kim_gdata; 639 struct kim_data_s *kim_gdata;
625 640
641 st_kim_devices[pdev->id] = pdev;
626 kim_gdata = kzalloc(sizeof(struct kim_data_s), GFP_ATOMIC); 642 kim_gdata = kzalloc(sizeof(struct kim_data_s), GFP_ATOMIC);
627 if (!kim_gdata) { 643 if (!kim_gdata) {
628 pr_err("no mem to allocate"); 644 pr_err("no mem to allocate");
diff --git a/drivers/staging/tm6000/Kconfig b/drivers/staging/tm6000/Kconfig
index c725356cc346..de7ebb99d8f6 100644
--- a/drivers/staging/tm6000/Kconfig
+++ b/drivers/staging/tm6000/Kconfig
@@ -1,6 +1,6 @@
1config VIDEO_TM6000 1config VIDEO_TM6000
2 tristate "TV Master TM5600/6000/6010 driver" 2 tristate "TV Master TM5600/6000/6010 driver"
3 depends on VIDEO_DEV && I2C && INPUT && USB && EXPERIMENTAL 3 depends on VIDEO_DEV && I2C && INPUT && IR_CORE && USB && EXPERIMENTAL
4 select VIDEO_TUNER 4 select VIDEO_TUNER
5 select MEDIA_TUNER_XC2028 5 select MEDIA_TUNER_XC2028
6 select MEDIA_TUNER_XC5000 6 select MEDIA_TUNER_XC5000
diff --git a/drivers/staging/tm6000/tm6000-input.c b/drivers/staging/tm6000/tm6000-input.c
index 32f7a0af6938..54f7667cc706 100644
--- a/drivers/staging/tm6000/tm6000-input.c
+++ b/drivers/staging/tm6000/tm6000-input.c
@@ -46,7 +46,7 @@ MODULE_PARM_DESC(enable_ir, "enable ir (default is enable");
46 } 46 }
47 47
48struct tm6000_ir_poll_result { 48struct tm6000_ir_poll_result {
49 u8 rc_data[4]; 49 u16 rc_data;
50}; 50};
51 51
52struct tm6000_IR { 52struct tm6000_IR {
@@ -60,9 +60,9 @@ struct tm6000_IR {
60 int polling; 60 int polling;
61 struct delayed_work work; 61 struct delayed_work work;
62 u8 wait:1; 62 u8 wait:1;
63 u8 key:1;
63 struct urb *int_urb; 64 struct urb *int_urb;
64 u8 *urb_data; 65 u8 *urb_data;
65 u8 key:1;
66 66
67 int (*get_key) (struct tm6000_IR *, struct tm6000_ir_poll_result *); 67 int (*get_key) (struct tm6000_IR *, struct tm6000_ir_poll_result *);
68 68
@@ -122,13 +122,14 @@ static void tm6000_ir_urb_received(struct urb *urb)
122 122
123 if (urb->status != 0) 123 if (urb->status != 0)
124 printk(KERN_INFO "not ready\n"); 124 printk(KERN_INFO "not ready\n");
125 else if (urb->actual_length > 0) 125 else if (urb->actual_length > 0) {
126 memcpy(ir->urb_data, urb->transfer_buffer, urb->actual_length); 126 memcpy(ir->urb_data, urb->transfer_buffer, urb->actual_length);
127 127
128 dprintk("data %02x %02x %02x %02x\n", ir->urb_data[0], 128 dprintk("data %02x %02x %02x %02x\n", ir->urb_data[0],
129 ir->urb_data[1], ir->urb_data[2], ir->urb_data[3]); 129 ir->urb_data[1], ir->urb_data[2], ir->urb_data[3]);
130 130
131 ir->key = 1; 131 ir->key = 1;
132 }
132 133
133 rc = usb_submit_urb(urb, GFP_ATOMIC); 134 rc = usb_submit_urb(urb, GFP_ATOMIC);
134} 135}
@@ -140,30 +141,47 @@ static int default_polling_getkey(struct tm6000_IR *ir,
140 int rc; 141 int rc;
141 u8 buf[2]; 142 u8 buf[2];
142 143
143 if (ir->wait && !&dev->int_in) { 144 if (ir->wait && !&dev->int_in)
144 poll_result->rc_data[0] = 0xff;
145 return 0; 145 return 0;
146 }
147 146
148 if (&dev->int_in) { 147 if (&dev->int_in) {
149 poll_result->rc_data[0] = ir->urb_data[0]; 148 if (ir->ir.ir_type == IR_TYPE_RC5)
150 poll_result->rc_data[1] = ir->urb_data[1]; 149 poll_result->rc_data = ir->urb_data[0];
150 else
151 poll_result->rc_data = ir->urb_data[0] | ir->urb_data[1] << 8;
151 } else { 152 } else {
152 tm6000_set_reg(dev, REQ_04_EN_DISABLE_MCU_INT, 2, 0); 153 tm6000_set_reg(dev, REQ_04_EN_DISABLE_MCU_INT, 2, 0);
153 msleep(10); 154 msleep(10);
154 tm6000_set_reg(dev, REQ_04_EN_DISABLE_MCU_INT, 2, 1); 155 tm6000_set_reg(dev, REQ_04_EN_DISABLE_MCU_INT, 2, 1);
155 msleep(10); 156 msleep(10);
156 157
157 rc = tm6000_read_write_usb(dev, USB_DIR_IN | USB_TYPE_VENDOR | 158 if (ir->ir.ir_type == IR_TYPE_RC5) {
158 USB_RECIP_DEVICE, REQ_02_GET_IR_CODE, 0, 0, buf, 1); 159 rc = tm6000_read_write_usb(dev, USB_DIR_IN |
160 USB_TYPE_VENDOR | USB_RECIP_DEVICE,
161 REQ_02_GET_IR_CODE, 0, 0, buf, 1);
159 162
160 msleep(10); 163 msleep(10);
161 164
162 dprintk("read data=%02x\n", buf[0]); 165 dprintk("read data=%02x\n", buf[0]);
163 if (rc < 0) 166 if (rc < 0)
164 return rc; 167 return rc;
165 168
166 poll_result->rc_data[0] = buf[0]; 169 poll_result->rc_data = buf[0];
170 } else {
171 rc = tm6000_read_write_usb(dev, USB_DIR_IN |
172 USB_TYPE_VENDOR | USB_RECIP_DEVICE,
173 REQ_02_GET_IR_CODE, 0, 0, buf, 2);
174
175 msleep(10);
176
177 dprintk("read data=%04x\n", buf[0] | buf[1] << 8);
178 if (rc < 0)
179 return rc;
180
181 poll_result->rc_data = buf[0] | buf[1] << 8;
182 }
183 if ((poll_result->rc_data & 0x00ff) != 0xff)
184 ir->key = 1;
167 } 185 }
168 return 0; 186 return 0;
169} 187}
@@ -180,12 +198,11 @@ static void tm6000_ir_handle_key(struct tm6000_IR *ir)
180 return; 198 return;
181 } 199 }
182 200
183 dprintk("ir->get_key result data=%02x %02x\n", 201 dprintk("ir->get_key result data=%04x\n", poll_result.rc_data);
184 poll_result.rc_data[0], poll_result.rc_data[1]);
185 202
186 if (poll_result.rc_data[0] != 0xff && ir->key == 1) { 203 if (ir->key) {
187 ir_input_keydown(ir->input->input_dev, &ir->ir, 204 ir_input_keydown(ir->input->input_dev, &ir->ir,
188 poll_result.rc_data[0] | poll_result.rc_data[1] << 8); 205 (u32)poll_result.rc_data);
189 206
190 ir_input_nokey(ir->input->input_dev, &ir->ir); 207 ir_input_nokey(ir->input->input_dev, &ir->ir);
191 ir->key = 0; 208 ir->key = 0;
diff --git a/drivers/staging/vt6655/wpactl.c b/drivers/staging/vt6655/wpactl.c
index 0142338bcafe..4bdb8362de82 100644
--- a/drivers/staging/vt6655/wpactl.c
+++ b/drivers/staging/vt6655/wpactl.c
@@ -766,9 +766,14 @@ static int wpa_set_associate(PSDevice pDevice,
766 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wpa_ie_len = %d\n", param->u.wpa_associate.wpa_ie_len); 766 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wpa_ie_len = %d\n", param->u.wpa_associate.wpa_ie_len);
767 767
768 768
769 if (param->u.wpa_associate.wpa_ie && 769 if (param->u.wpa_associate.wpa_ie_len) {
770 copy_from_user(&abyWPAIE[0], param->u.wpa_associate.wpa_ie, param->u.wpa_associate.wpa_ie_len)) 770 if (!param->u.wpa_associate.wpa_ie)
771 return -EINVAL; 771 return -EINVAL;
772 if (param->u.wpa_associate.wpa_ie_len > sizeof(abyWPAIE))
773 return -EINVAL;
774 if (copy_from_user(&abyWPAIE[0], param->u.wpa_associate.wpa_ie, param->u.wpa_associate.wpa_ie_len))
775 return -EFAULT;
776 }
772 777
773 if (param->u.wpa_associate.mode == 1) 778 if (param->u.wpa_associate.mode == 1)
774 pMgmt->eConfigMode = WMAC_CONFIG_IBSS_STA; 779 pMgmt->eConfigMode = WMAC_CONFIG_IBSS_STA;
diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c
index 368c30a9d5ff..4af83d5318f2 100644
--- a/drivers/staging/wlan-ng/cfg80211.c
+++ b/drivers/staging/wlan-ng/cfg80211.c
@@ -219,6 +219,7 @@ int prism2_get_key(struct wiphy *wiphy, struct net_device *dev,
219 return -ENOENT; 219 return -ENOENT;
220 params.key_len = len; 220 params.key_len = len;
221 params.key = wlandev->wep_keys[key_index]; 221 params.key = wlandev->wep_keys[key_index];
222 params.seq_len = 0;
222 223
223 callback(cookie, &params); 224 callback(cookie, &params);
224 225
@@ -735,6 +736,8 @@ struct wiphy *wlan_create_wiphy(struct device *dev, wlandevice_t *wlandev)
735 priv->band.n_channels = ARRAY_SIZE(prism2_channels); 736 priv->band.n_channels = ARRAY_SIZE(prism2_channels);
736 priv->band.bitrates = priv->rates; 737 priv->band.bitrates = priv->rates;
737 priv->band.n_bitrates = ARRAY_SIZE(prism2_rates); 738 priv->band.n_bitrates = ARRAY_SIZE(prism2_rates);
739 priv->band.band = IEEE80211_BAND_2GHZ;
740 priv->band.ht_cap.ht_supported = false;
738 wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band; 741 wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
739 742
740 set_wiphy_dev(wiphy, dev); 743 set_wiphy_dev(wiphy, dev);
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
index 77d4d715a789..722c840ac638 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -769,6 +769,7 @@ static int __init zram_init(void)
769free_devices: 769free_devices:
770 while (dev_id) 770 while (dev_id)
771 destroy_device(&devices[--dev_id]); 771 destroy_device(&devices[--dev_id]);
772 kfree(devices);
772unregister: 773unregister:
773 unregister_blkdev(zram_major, "zram"); 774 unregister_blkdev(zram_major, "zram");
774out: 775out:
diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
index 593fc5e2d2e6..5af23cc5ea9f 100644
--- a/drivers/usb/atm/cxacru.c
+++ b/drivers/usb/atm/cxacru.c
@@ -1127,6 +1127,7 @@ static int cxacru_bind(struct usbatm_data *usbatm_instance,
1127{ 1127{
1128 struct cxacru_data *instance; 1128 struct cxacru_data *instance;
1129 struct usb_device *usb_dev = interface_to_usbdev(intf); 1129 struct usb_device *usb_dev = interface_to_usbdev(intf);
1130 struct usb_host_endpoint *cmd_ep = usb_dev->ep_in[CXACRU_EP_CMD];
1130 int ret; 1131 int ret;
1131 1132
1132 /* instance init */ 1133 /* instance init */
@@ -1171,15 +1172,34 @@ static int cxacru_bind(struct usbatm_data *usbatm_instance,
1171 goto fail; 1172 goto fail;
1172 } 1173 }
1173 1174
1174 usb_fill_int_urb(instance->rcv_urb, 1175 if (!cmd_ep) {
1176 dbg("cxacru_bind: no command endpoint");
1177 ret = -ENODEV;
1178 goto fail;
1179 }
1180
1181 if ((cmd_ep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
1182 == USB_ENDPOINT_XFER_INT) {
1183 usb_fill_int_urb(instance->rcv_urb,
1175 usb_dev, usb_rcvintpipe(usb_dev, CXACRU_EP_CMD), 1184 usb_dev, usb_rcvintpipe(usb_dev, CXACRU_EP_CMD),
1176 instance->rcv_buf, PAGE_SIZE, 1185 instance->rcv_buf, PAGE_SIZE,
1177 cxacru_blocking_completion, &instance->rcv_done, 1); 1186 cxacru_blocking_completion, &instance->rcv_done, 1);
1178 1187
1179 usb_fill_int_urb(instance->snd_urb, 1188 usb_fill_int_urb(instance->snd_urb,
1180 usb_dev, usb_sndintpipe(usb_dev, CXACRU_EP_CMD), 1189 usb_dev, usb_sndintpipe(usb_dev, CXACRU_EP_CMD),
1181 instance->snd_buf, PAGE_SIZE, 1190 instance->snd_buf, PAGE_SIZE,
1182 cxacru_blocking_completion, &instance->snd_done, 4); 1191 cxacru_blocking_completion, &instance->snd_done, 4);
1192 } else {
1193 usb_fill_bulk_urb(instance->rcv_urb,
1194 usb_dev, usb_rcvbulkpipe(usb_dev, CXACRU_EP_CMD),
1195 instance->rcv_buf, PAGE_SIZE,
1196 cxacru_blocking_completion, &instance->rcv_done);
1197
1198 usb_fill_bulk_urb(instance->snd_urb,
1199 usb_dev, usb_sndbulkpipe(usb_dev, CXACRU_EP_CMD),
1200 instance->snd_buf, PAGE_SIZE,
1201 cxacru_blocking_completion, &instance->snd_done);
1202 }
1183 1203
1184 mutex_init(&instance->cm_serialize); 1204 mutex_init(&instance->cm_serialize);
1185 1205
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 1833b3a71515..bc62fae0680f 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -965,7 +965,8 @@ static int acm_probe(struct usb_interface *intf,
965 } 965 }
966 966
967 if (!buflen) { 967 if (!buflen) {
968 if (intf->cur_altsetting->endpoint->extralen && 968 if (intf->cur_altsetting->endpoint &&
969 intf->cur_altsetting->endpoint->extralen &&
969 intf->cur_altsetting->endpoint->extra) { 970 intf->cur_altsetting->endpoint->extra) {
970 dev_dbg(&intf->dev, 971 dev_dbg(&intf->dev,
971 "Seeking extra descriptors on endpoint\n"); 972 "Seeking extra descriptors on endpoint\n");
@@ -1481,6 +1482,11 @@ static int acm_reset_resume(struct usb_interface *intf)
1481 USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, \ 1482 USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, \
1482 USB_CDC_ACM_PROTO_VENDOR) 1483 USB_CDC_ACM_PROTO_VENDOR)
1483 1484
1485#define SAMSUNG_PCSUITE_ACM_INFO(x) \
1486 USB_DEVICE_AND_INTERFACE_INFO(0x04e7, x, \
1487 USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, \
1488 USB_CDC_ACM_PROTO_VENDOR)
1489
1484/* 1490/*
1485 * USB driver structure. 1491 * USB driver structure.
1486 */ 1492 */
@@ -1591,6 +1597,17 @@ static const struct usb_device_id acm_ids[] = {
1591 { NOKIA_PCSUITE_ACM_INFO(0x0108), }, /* Nokia 5320 XpressMusic 2G */ 1597 { NOKIA_PCSUITE_ACM_INFO(0x0108), }, /* Nokia 5320 XpressMusic 2G */
1592 { NOKIA_PCSUITE_ACM_INFO(0x01f5), }, /* Nokia N97, RM-505 */ 1598 { NOKIA_PCSUITE_ACM_INFO(0x01f5), }, /* Nokia N97, RM-505 */
1593 { NOKIA_PCSUITE_ACM_INFO(0x02e3), }, /* Nokia 5230, RM-588 */ 1599 { NOKIA_PCSUITE_ACM_INFO(0x02e3), }, /* Nokia 5230, RM-588 */
1600 { NOKIA_PCSUITE_ACM_INFO(0x0178), }, /* Nokia E63 */
1601 { NOKIA_PCSUITE_ACM_INFO(0x010e), }, /* Nokia E75 */
1602 { NOKIA_PCSUITE_ACM_INFO(0x02d9), }, /* Nokia 6760 Slide */
1603 { NOKIA_PCSUITE_ACM_INFO(0x01d0), }, /* Nokia E52 */
1604 { NOKIA_PCSUITE_ACM_INFO(0x0223), }, /* Nokia E72 */
1605 { NOKIA_PCSUITE_ACM_INFO(0x0275), }, /* Nokia X6 */
1606 { NOKIA_PCSUITE_ACM_INFO(0x026c), }, /* Nokia N97 Mini */
1607 { NOKIA_PCSUITE_ACM_INFO(0x0154), }, /* Nokia 5800 XpressMusic */
1608 { NOKIA_PCSUITE_ACM_INFO(0x04ce), }, /* Nokia E90 */
1609 { NOKIA_PCSUITE_ACM_INFO(0x01d4), }, /* Nokia E55 */
1610 { SAMSUNG_PCSUITE_ACM_INFO(0x6651), }, /* Samsung GTi8510 (INNOV8) */
1594 1611
1595 /* NOTE: non-Nokia COMM/ACM/0xff is likely MSFT RNDIS... NOT a modem! */ 1612 /* NOTE: non-Nokia COMM/ACM/0xff is likely MSFT RNDIS... NOT a modem! */
1596 1613
@@ -1599,6 +1616,10 @@ static const struct usb_device_id acm_ids[] = {
1599 .driver_info = NOT_A_MODEM, 1616 .driver_info = NOT_A_MODEM,
1600 }, 1617 },
1601 1618
1619 /* control interfaces without any protocol set */
1620 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
1621 USB_CDC_PROTO_NONE) },
1622
1602 /* control interfaces with various AT-command sets */ 1623 /* control interfaces with various AT-command sets */
1603 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, 1624 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
1604 USB_CDC_ACM_PROTO_AT_V25TER) }, 1625 USB_CDC_ACM_PROTO_AT_V25TER) },
diff --git a/drivers/usb/core/Kconfig b/drivers/usb/core/Kconfig
index 7e594449600e..9eed5b52d9de 100644
--- a/drivers/usb/core/Kconfig
+++ b/drivers/usb/core/Kconfig
@@ -91,12 +91,12 @@ config USB_DYNAMIC_MINORS
91 If you are unsure about this, say N here. 91 If you are unsure about this, say N here.
92 92
93config USB_SUSPEND 93config USB_SUSPEND
94 bool "USB runtime power management (suspend/resume and wakeup)" 94 bool "USB runtime power management (autosuspend) and wakeup"
95 depends on USB && PM_RUNTIME 95 depends on USB && PM_RUNTIME
96 help 96 help
97 If you say Y here, you can use driver calls or the sysfs 97 If you say Y here, you can use driver calls or the sysfs
98 "power/level" file to suspend or resume individual USB 98 "power/control" file to enable or disable autosuspend for
99 peripherals and to enable or disable autosuspend (see 99 individual USB peripherals (see
100 Documentation/usb/power-management.txt for more details). 100 Documentation/usb/power-management.txt for more details).
101 101
102 Also, USB "remote wakeup" signaling is supported, whereby some 102 Also, USB "remote wakeup" signaling is supported, whereby some
diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c
index f06f5dbc8cdc..1e6ccef2cf0c 100644
--- a/drivers/usb/core/file.c
+++ b/drivers/usb/core/file.c
@@ -159,9 +159,9 @@ void usb_major_cleanup(void)
159int usb_register_dev(struct usb_interface *intf, 159int usb_register_dev(struct usb_interface *intf,
160 struct usb_class_driver *class_driver) 160 struct usb_class_driver *class_driver)
161{ 161{
162 int retval = -EINVAL; 162 int retval;
163 int minor_base = class_driver->minor_base; 163 int minor_base = class_driver->minor_base;
164 int minor = 0; 164 int minor;
165 char name[20]; 165 char name[20];
166 char *temp; 166 char *temp;
167 167
@@ -173,12 +173,17 @@ int usb_register_dev(struct usb_interface *intf,
173 */ 173 */
174 minor_base = 0; 174 minor_base = 0;
175#endif 175#endif
176 intf->minor = -1;
177
178 dbg ("looking for a minor, starting at %d", minor_base);
179 176
180 if (class_driver->fops == NULL) 177 if (class_driver->fops == NULL)
181 goto exit; 178 return -EINVAL;
179 if (intf->minor >= 0)
180 return -EADDRINUSE;
181
182 retval = init_usb_class();
183 if (retval)
184 return retval;
185
186 dev_dbg(&intf->dev, "looking for a minor, starting at %d", minor_base);
182 187
183 down_write(&minor_rwsem); 188 down_write(&minor_rwsem);
184 for (minor = minor_base; minor < MAX_USB_MINORS; ++minor) { 189 for (minor = minor_base; minor < MAX_USB_MINORS; ++minor) {
@@ -186,20 +191,12 @@ int usb_register_dev(struct usb_interface *intf,
186 continue; 191 continue;
187 192
188 usb_minors[minor] = class_driver->fops; 193 usb_minors[minor] = class_driver->fops;
189 194 intf->minor = minor;
190 retval = 0;
191 break; 195 break;
192 } 196 }
193 up_write(&minor_rwsem); 197 up_write(&minor_rwsem);
194 198 if (intf->minor < 0)
195 if (retval) 199 return -EXFULL;
196 goto exit;
197
198 retval = init_usb_class();
199 if (retval)
200 goto exit;
201
202 intf->minor = minor;
203 200
204 /* create a usb class device for this usb interface */ 201 /* create a usb class device for this usb interface */
205 snprintf(name, sizeof(name), class_driver->name, minor - minor_base); 202 snprintf(name, sizeof(name), class_driver->name, minor - minor_base);
@@ -213,11 +210,11 @@ int usb_register_dev(struct usb_interface *intf,
213 "%s", temp); 210 "%s", temp);
214 if (IS_ERR(intf->usb_dev)) { 211 if (IS_ERR(intf->usb_dev)) {
215 down_write(&minor_rwsem); 212 down_write(&minor_rwsem);
216 usb_minors[intf->minor] = NULL; 213 usb_minors[minor] = NULL;
214 intf->minor = -1;
217 up_write(&minor_rwsem); 215 up_write(&minor_rwsem);
218 retval = PTR_ERR(intf->usb_dev); 216 retval = PTR_ERR(intf->usb_dev);
219 } 217 }
220exit:
221 return retval; 218 return retval;
222} 219}
223EXPORT_SYMBOL_GPL(usb_register_dev); 220EXPORT_SYMBOL_GPL(usb_register_dev);
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index fd4c36ea5e46..9f0ce7de0e36 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1724,6 +1724,15 @@ free_interfaces:
1724 if (ret) 1724 if (ret)
1725 goto free_interfaces; 1725 goto free_interfaces;
1726 1726
1727 /* if it's already configured, clear out old state first.
1728 * getting rid of old interfaces means unbinding their drivers.
1729 */
1730 if (dev->state != USB_STATE_ADDRESS)
1731 usb_disable_device(dev, 1); /* Skip ep0 */
1732
1733 /* Get rid of pending async Set-Config requests for this device */
1734 cancel_async_set_config(dev);
1735
1727 /* Make sure we have bandwidth (and available HCD resources) for this 1736 /* Make sure we have bandwidth (and available HCD resources) for this
1728 * configuration. Remove endpoints from the schedule if we're dropping 1737 * configuration. Remove endpoints from the schedule if we're dropping
1729 * this configuration to set configuration 0. After this point, the 1738 * this configuration to set configuration 0. After this point, the
@@ -1733,20 +1742,11 @@ free_interfaces:
1733 mutex_lock(&hcd->bandwidth_mutex); 1742 mutex_lock(&hcd->bandwidth_mutex);
1734 ret = usb_hcd_alloc_bandwidth(dev, cp, NULL, NULL); 1743 ret = usb_hcd_alloc_bandwidth(dev, cp, NULL, NULL);
1735 if (ret < 0) { 1744 if (ret < 0) {
1736 usb_autosuspend_device(dev);
1737 mutex_unlock(&hcd->bandwidth_mutex); 1745 mutex_unlock(&hcd->bandwidth_mutex);
1746 usb_autosuspend_device(dev);
1738 goto free_interfaces; 1747 goto free_interfaces;
1739 } 1748 }
1740 1749
1741 /* if it's already configured, clear out old state first.
1742 * getting rid of old interfaces means unbinding their drivers.
1743 */
1744 if (dev->state != USB_STATE_ADDRESS)
1745 usb_disable_device(dev, 1); /* Skip ep0 */
1746
1747 /* Get rid of pending async Set-Config requests for this device */
1748 cancel_async_set_config(dev);
1749
1750 ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 1750 ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
1751 USB_REQ_SET_CONFIGURATION, 0, configuration, 0, 1751 USB_REQ_SET_CONFIGURATION, 0, configuration, 0,
1752 NULL, 0, USB_CTRL_SET_TIMEOUT); 1752 NULL, 0, USB_CTRL_SET_TIMEOUT);
@@ -1761,8 +1761,8 @@ free_interfaces:
1761 if (!cp) { 1761 if (!cp) {
1762 usb_set_device_state(dev, USB_STATE_ADDRESS); 1762 usb_set_device_state(dev, USB_STATE_ADDRESS);
1763 usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL); 1763 usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL);
1764 usb_autosuspend_device(dev);
1765 mutex_unlock(&hcd->bandwidth_mutex); 1764 mutex_unlock(&hcd->bandwidth_mutex);
1765 usb_autosuspend_device(dev);
1766 goto free_interfaces; 1766 goto free_interfaces;
1767 } 1767 }
1768 mutex_unlock(&hcd->bandwidth_mutex); 1768 mutex_unlock(&hcd->bandwidth_mutex);
@@ -1802,6 +1802,7 @@ free_interfaces:
1802 intf->dev.groups = usb_interface_groups; 1802 intf->dev.groups = usb_interface_groups;
1803 intf->dev.dma_mask = dev->dev.dma_mask; 1803 intf->dev.dma_mask = dev->dev.dma_mask;
1804 INIT_WORK(&intf->reset_ws, __usb_queue_reset_device); 1804 INIT_WORK(&intf->reset_ws, __usb_queue_reset_device);
1805 intf->minor = -1;
1805 device_initialize(&intf->dev); 1806 device_initialize(&intf->dev);
1806 dev_set_name(&intf->dev, "%d-%s:%d.%d", 1807 dev_set_name(&intf->dev, "%d-%s:%d.%d",
1807 dev->bus->busnum, dev->devpath, 1808 dev->bus->busnum, dev->devpath,
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index e483f80822d2..1160c55de7f2 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -723,12 +723,12 @@ int usb_string_ids_tab(struct usb_composite_dev *cdev, struct usb_string *str)
723 723
724/** 724/**
725 * usb_string_ids_n() - allocate unused string IDs in batch 725 * usb_string_ids_n() - allocate unused string IDs in batch
726 * @cdev: the device whose string descriptor IDs are being allocated 726 * @c: the device whose string descriptor IDs are being allocated
727 * @n: number of string IDs to allocate 727 * @n: number of string IDs to allocate
728 * Context: single threaded during gadget setup 728 * Context: single threaded during gadget setup
729 * 729 *
730 * Returns the first requested ID. This ID and next @n-1 IDs are now 730 * Returns the first requested ID. This ID and next @n-1 IDs are now
731 * valid IDs. At least providind that @n is non zore because if it 731 * valid IDs. At least provided that @n is non-zero because if it
732 * is, returns last requested ID which is now very useful information. 732 * is, returns last requested ID which is now very useful information.
733 * 733 *
734 * @usb_string_ids_n() is called from bind() callbacks to allocate 734 * @usb_string_ids_n() is called from bind() callbacks to allocate
diff --git a/drivers/usb/gadget/m66592-udc.c b/drivers/usb/gadget/m66592-udc.c
index 166bf71fd348..e03058fe23cb 100644
--- a/drivers/usb/gadget/m66592-udc.c
+++ b/drivers/usb/gadget/m66592-udc.c
@@ -1609,6 +1609,7 @@ static int __init m66592_probe(struct platform_device *pdev)
1609 /* initialize ucd */ 1609 /* initialize ucd */
1610 m66592 = kzalloc(sizeof(struct m66592), GFP_KERNEL); 1610 m66592 = kzalloc(sizeof(struct m66592), GFP_KERNEL);
1611 if (m66592 == NULL) { 1611 if (m66592 == NULL) {
1612 ret = -ENOMEM;
1612 pr_err("kzalloc error\n"); 1613 pr_err("kzalloc error\n");
1613 goto clean_up; 1614 goto clean_up;
1614 } 1615 }
diff --git a/drivers/usb/gadget/r8a66597-udc.c b/drivers/usb/gadget/r8a66597-udc.c
index 70a817842755..2456ccd9965e 100644
--- a/drivers/usb/gadget/r8a66597-udc.c
+++ b/drivers/usb/gadget/r8a66597-udc.c
@@ -1557,6 +1557,7 @@ static int __init r8a66597_probe(struct platform_device *pdev)
1557 /* initialize ucd */ 1557 /* initialize ucd */
1558 r8a66597 = kzalloc(sizeof(struct r8a66597), GFP_KERNEL); 1558 r8a66597 = kzalloc(sizeof(struct r8a66597), GFP_KERNEL);
1559 if (r8a66597 == NULL) { 1559 if (r8a66597 == NULL) {
1560 ret = -ENOMEM;
1560 printk(KERN_ERR "kzalloc error\n"); 1561 printk(KERN_ERR "kzalloc error\n");
1561 goto clean_up; 1562 goto clean_up;
1562 } 1563 }
diff --git a/drivers/usb/gadget/rndis.c b/drivers/usb/gadget/rndis.c
index 020fa5a25fda..972d5ddd1e18 100644
--- a/drivers/usb/gadget/rndis.c
+++ b/drivers/usb/gadget/rndis.c
@@ -293,9 +293,13 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
293 /* mandatory */ 293 /* mandatory */
294 case OID_GEN_VENDOR_DESCRIPTION: 294 case OID_GEN_VENDOR_DESCRIPTION:
295 pr_debug("%s: OID_GEN_VENDOR_DESCRIPTION\n", __func__); 295 pr_debug("%s: OID_GEN_VENDOR_DESCRIPTION\n", __func__);
296 length = strlen (rndis_per_dev_params [configNr].vendorDescr); 296 if ( rndis_per_dev_params [configNr].vendorDescr ) {
297 memcpy (outbuf, 297 length = strlen (rndis_per_dev_params [configNr].vendorDescr);
298 rndis_per_dev_params [configNr].vendorDescr, length); 298 memcpy (outbuf,
299 rndis_per_dev_params [configNr].vendorDescr, length);
300 } else {
301 outbuf[0] = 0;
302 }
299 retval = 0; 303 retval = 0;
300 break; 304 break;
301 305
@@ -1148,7 +1152,7 @@ static struct proc_dir_entry *rndis_connect_state [RNDIS_MAX_CONFIGS];
1148#endif /* CONFIG_USB_GADGET_DEBUG_FILES */ 1152#endif /* CONFIG_USB_GADGET_DEBUG_FILES */
1149 1153
1150 1154
1151int __init rndis_init (void) 1155int rndis_init(void)
1152{ 1156{
1153 u8 i; 1157 u8 i;
1154 1158
diff --git a/drivers/usb/gadget/rndis.h b/drivers/usb/gadget/rndis.h
index c236aaa9dcd1..907c33008118 100644
--- a/drivers/usb/gadget/rndis.h
+++ b/drivers/usb/gadget/rndis.h
@@ -262,7 +262,7 @@ int rndis_signal_disconnect (int configNr);
262int rndis_state (int configNr); 262int rndis_state (int configNr);
263extern void rndis_set_host_mac (int configNr, const u8 *addr); 263extern void rndis_set_host_mac (int configNr, const u8 *addr);
264 264
265int __devinit rndis_init (void); 265int rndis_init(void);
266void rndis_exit (void); 266void rndis_exit (void);
267 267
268#endif /* _LINUX_RNDIS_H */ 268#endif /* _LINUX_RNDIS_H */
diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c
index 521ebed0118d..a229744a8c7d 100644
--- a/drivers/usb/gadget/s3c-hsotg.c
+++ b/drivers/usb/gadget/s3c-hsotg.c
@@ -12,8 +12,6 @@
12 * published by the Free Software Foundation. 12 * published by the Free Software Foundation.
13*/ 13*/
14 14
15#define DEBUG
16
17#include <linux/kernel.h> 15#include <linux/kernel.h>
18#include <linux/module.h> 16#include <linux/module.h>
19#include <linux/spinlock.h> 17#include <linux/spinlock.h>
diff --git a/drivers/usb/gadget/uvc_v4l2.c b/drivers/usb/gadget/uvc_v4l2.c
index 2dcffdac86d2..5e807f083bc8 100644
--- a/drivers/usb/gadget/uvc_v4l2.c
+++ b/drivers/usb/gadget/uvc_v4l2.c
@@ -94,7 +94,7 @@ uvc_v4l2_set_format(struct uvc_video *video, struct v4l2_format *fmt)
94 break; 94 break;
95 } 95 }
96 96
97 if (format == NULL || format->fcc != fmt->fmt.pix.pixelformat) { 97 if (i == ARRAY_SIZE(uvc_formats)) {
98 printk(KERN_INFO "Unsupported format 0x%08x.\n", 98 printk(KERN_INFO "Unsupported format 0x%08x.\n",
99 fmt->fmt.pix.pixelformat); 99 fmt->fmt.pix.pixelformat);
100 return -EINVAL; 100 return -EINVAL;
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index 58b72d741d93..a1e8d273103f 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -119,6 +119,11 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
119 ehci->broken_periodic = 1; 119 ehci->broken_periodic = 1;
120 ehci_info(ehci, "using broken periodic workaround\n"); 120 ehci_info(ehci, "using broken periodic workaround\n");
121 } 121 }
122 if (pdev->device == 0x0806 || pdev->device == 0x0811
123 || pdev->device == 0x0829) {
124 ehci_info(ehci, "disable lpm for langwell/penwell\n");
125 ehci->has_lpm = 0;
126 }
122 break; 127 break;
123 case PCI_VENDOR_ID_TDI: 128 case PCI_VENDOR_ID_TDI:
124 if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) { 129 if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) {
diff --git a/drivers/usb/host/ehci-ppc-of.c b/drivers/usb/host/ehci-ppc-of.c
index 335ee699fd85..ba52be473027 100644
--- a/drivers/usb/host/ehci-ppc-of.c
+++ b/drivers/usb/host/ehci-ppc-of.c
@@ -192,17 +192,19 @@ ehci_hcd_ppc_of_probe(struct platform_device *op, const struct of_device_id *mat
192 } 192 }
193 193
194 rv = usb_add_hcd(hcd, irq, 0); 194 rv = usb_add_hcd(hcd, irq, 0);
195 if (rv == 0) 195 if (rv)
196 return 0; 196 goto err_ehci;
197
198 return 0;
197 199
200err_ehci:
201 if (ehci->has_amcc_usb23)
202 iounmap(ehci->ohci_hcctrl_reg);
198 iounmap(hcd->regs); 203 iounmap(hcd->regs);
199err_ioremap: 204err_ioremap:
200 irq_dispose_mapping(irq); 205 irq_dispose_mapping(irq);
201err_irq: 206err_irq:
202 release_mem_region(hcd->rsrc_start, hcd->rsrc_len); 207 release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
203
204 if (ehci->has_amcc_usb23)
205 iounmap(ehci->ohci_hcctrl_reg);
206err_rmr: 208err_rmr:
207 usb_put_hcd(hcd); 209 usb_put_hcd(hcd);
208 210
diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c
index d1a3dfc9a408..bdba8c5d844a 100644
--- a/drivers/usb/host/isp1760-hcd.c
+++ b/drivers/usb/host/isp1760-hcd.c
@@ -829,6 +829,7 @@ static void enqueue_an_ATL_packet(struct usb_hcd *hcd, struct isp1760_qh *qh,
829 * almost immediately. With ISP1761, this register requires a delay of 829 * almost immediately. With ISP1761, this register requires a delay of
830 * 195ns between a write and subsequent read (see section 15.1.1.3). 830 * 195ns between a write and subsequent read (see section 15.1.1.3).
831 */ 831 */
832 mmiowb();
832 ndelay(195); 833 ndelay(195);
833 skip_map = isp1760_readl(hcd->regs + HC_ATL_PTD_SKIPMAP_REG); 834 skip_map = isp1760_readl(hcd->regs + HC_ATL_PTD_SKIPMAP_REG);
834 835
@@ -870,6 +871,7 @@ static void enqueue_an_INT_packet(struct usb_hcd *hcd, struct isp1760_qh *qh,
870 * almost immediately. With ISP1761, this register requires a delay of 871 * almost immediately. With ISP1761, this register requires a delay of
871 * 195ns between a write and subsequent read (see section 15.1.1.3). 872 * 195ns between a write and subsequent read (see section 15.1.1.3).
872 */ 873 */
874 mmiowb();
873 ndelay(195); 875 ndelay(195);
874 skip_map = isp1760_readl(hcd->regs + HC_INT_PTD_SKIPMAP_REG); 876 skip_map = isp1760_readl(hcd->regs + HC_INT_PTD_SKIPMAP_REG);
875 877
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index bc3f4f427065..48e60d166ff0 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -131,7 +131,7 @@ static void next_trb(struct xhci_hcd *xhci,
131 *seg = (*seg)->next; 131 *seg = (*seg)->next;
132 *trb = ((*seg)->trbs); 132 *trb = ((*seg)->trbs);
133 } else { 133 } else {
134 *trb = (*trb)++; 134 (*trb)++;
135 } 135 }
136} 136}
137 137
@@ -1551,6 +1551,10 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
1551 /* calc actual length */ 1551 /* calc actual length */
1552 if (ep->skip) { 1552 if (ep->skip) {
1553 td->urb->iso_frame_desc[idx].actual_length = 0; 1553 td->urb->iso_frame_desc[idx].actual_length = 0;
1554 /* Update ring dequeue pointer */
1555 while (ep_ring->dequeue != td->last_trb)
1556 inc_deq(xhci, ep_ring, false);
1557 inc_deq(xhci, ep_ring, false);
1554 return finish_td(xhci, td, event_trb, event, ep, status, true); 1558 return finish_td(xhci, td, event_trb, event, ep, status, true);
1555 } 1559 }
1556 1560
diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c
index d240de097c62..801324af9470 100644
--- a/drivers/usb/misc/adutux.c
+++ b/drivers/usb/misc/adutux.c
@@ -439,7 +439,7 @@ static ssize_t adu_read(struct file *file, __user char *buffer, size_t count,
439 /* drain secondary buffer */ 439 /* drain secondary buffer */
440 int amount = bytes_to_read < data_in_secondary ? bytes_to_read : data_in_secondary; 440 int amount = bytes_to_read < data_in_secondary ? bytes_to_read : data_in_secondary;
441 i = copy_to_user(buffer, dev->read_buffer_secondary+dev->secondary_head, amount); 441 i = copy_to_user(buffer, dev->read_buffer_secondary+dev->secondary_head, amount);
442 if (i < 0) { 442 if (i) {
443 retval = -EFAULT; 443 retval = -EFAULT;
444 goto exit; 444 goto exit;
445 } 445 }
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index 2de49c8887c5..bc88c79875a1 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -542,7 +542,7 @@ static long iowarrior_ioctl(struct file *file, unsigned int cmd,
542 retval = io_res; 542 retval = io_res;
543 else { 543 else {
544 io_res = copy_to_user(user_buffer, buffer, dev->report_size); 544 io_res = copy_to_user(user_buffer, buffer, dev->report_size);
545 if (io_res < 0) 545 if (io_res)
546 retval = -EFAULT; 546 retval = -EFAULT;
547 } 547 }
548 break; 548 break;
@@ -574,7 +574,7 @@ static long iowarrior_ioctl(struct file *file, unsigned int cmd,
574 } 574 }
575 io_res = copy_to_user((struct iowarrior_info __user *)arg, &info, 575 io_res = copy_to_user((struct iowarrior_info __user *)arg, &info,
576 sizeof(struct iowarrior_info)); 576 sizeof(struct iowarrior_info));
577 if (io_res < 0) 577 if (io_res)
578 retval = -EFAULT; 578 retval = -EFAULT;
579 break; 579 break;
580 } 580 }
diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c
index 59dc3d351b60..5ab5bb89bae3 100644
--- a/drivers/usb/musb/cppi_dma.c
+++ b/drivers/usb/musb/cppi_dma.c
@@ -322,6 +322,7 @@ cppi_channel_allocate(struct dma_controller *c,
322 index, transmit ? 'T' : 'R', cppi_ch); 322 index, transmit ? 'T' : 'R', cppi_ch);
323 cppi_ch->hw_ep = ep; 323 cppi_ch->hw_ep = ep;
324 cppi_ch->channel.status = MUSB_DMA_STATUS_FREE; 324 cppi_ch->channel.status = MUSB_DMA_STATUS_FREE;
325 cppi_ch->channel.max_len = 0x7fffffff;
325 326
326 DBG(4, "Allocate CPPI%d %cX\n", index, transmit ? 'T' : 'R'); 327 DBG(4, "Allocate CPPI%d %cX\n", index, transmit ? 'T' : 'R');
327 return &cppi_ch->channel; 328 return &cppi_ch->channel;
diff --git a/drivers/usb/musb/musb_debugfs.c b/drivers/usb/musb/musb_debugfs.c
index c79a5e30d437..9e8639d4e862 100644
--- a/drivers/usb/musb/musb_debugfs.c
+++ b/drivers/usb/musb/musb_debugfs.c
@@ -195,15 +195,14 @@ static const struct file_operations musb_regdump_fops = {
195 195
196static int musb_test_mode_open(struct inode *inode, struct file *file) 196static int musb_test_mode_open(struct inode *inode, struct file *file)
197{ 197{
198 file->private_data = inode->i_private;
199
200 return single_open(file, musb_test_mode_show, inode->i_private); 198 return single_open(file, musb_test_mode_show, inode->i_private);
201} 199}
202 200
203static ssize_t musb_test_mode_write(struct file *file, 201static ssize_t musb_test_mode_write(struct file *file,
204 const char __user *ubuf, size_t count, loff_t *ppos) 202 const char __user *ubuf, size_t count, loff_t *ppos)
205{ 203{
206 struct musb *musb = file->private_data; 204 struct seq_file *s = file->private_data;
205 struct musb *musb = s->private;
207 u8 test = 0; 206 u8 test = 0;
208 char buf[18]; 207 char buf[18];
209 208
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 6fca870e957e..d065e23f123e 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -300,6 +300,11 @@ static void txstate(struct musb *musb, struct musb_request *req)
300#ifndef CONFIG_MUSB_PIO_ONLY 300#ifndef CONFIG_MUSB_PIO_ONLY
301 if (is_dma_capable() && musb_ep->dma) { 301 if (is_dma_capable() && musb_ep->dma) {
302 struct dma_controller *c = musb->dma_controller; 302 struct dma_controller *c = musb->dma_controller;
303 size_t request_size;
304
305 /* setup DMA, then program endpoint CSR */
306 request_size = min_t(size_t, request->length - request->actual,
307 musb_ep->dma->max_len);
303 308
304 use_dma = (request->dma != DMA_ADDR_INVALID); 309 use_dma = (request->dma != DMA_ADDR_INVALID);
305 310
@@ -307,11 +312,6 @@ static void txstate(struct musb *musb, struct musb_request *req)
307 312
308#ifdef CONFIG_USB_INVENTRA_DMA 313#ifdef CONFIG_USB_INVENTRA_DMA
309 { 314 {
310 size_t request_size;
311
312 /* setup DMA, then program endpoint CSR */
313 request_size = min_t(size_t, request->length,
314 musb_ep->dma->max_len);
315 if (request_size < musb_ep->packet_sz) 315 if (request_size < musb_ep->packet_sz)
316 musb_ep->dma->desired_mode = 0; 316 musb_ep->dma->desired_mode = 0;
317 else 317 else
@@ -373,8 +373,8 @@ static void txstate(struct musb *musb, struct musb_request *req)
373 use_dma = use_dma && c->channel_program( 373 use_dma = use_dma && c->channel_program(
374 musb_ep->dma, musb_ep->packet_sz, 374 musb_ep->dma, musb_ep->packet_sz,
375 0, 375 0,
376 request->dma, 376 request->dma + request->actual,
377 request->length); 377 request_size);
378 if (!use_dma) { 378 if (!use_dma) {
379 c->channel_release(musb_ep->dma); 379 c->channel_release(musb_ep->dma);
380 musb_ep->dma = NULL; 380 musb_ep->dma = NULL;
@@ -386,8 +386,8 @@ static void txstate(struct musb *musb, struct musb_request *req)
386 use_dma = use_dma && c->channel_program( 386 use_dma = use_dma && c->channel_program(
387 musb_ep->dma, musb_ep->packet_sz, 387 musb_ep->dma, musb_ep->packet_sz,
388 request->zero, 388 request->zero,
389 request->dma, 389 request->dma + request->actual,
390 request->length); 390 request_size);
391#endif 391#endif
392 } 392 }
393#endif 393#endif
@@ -501,26 +501,14 @@ void musb_g_tx(struct musb *musb, u8 epnum)
501 request->zero = 0; 501 request->zero = 0;
502 } 502 }
503 503
504 /* ... or if not, then complete it. */ 504 if (request->actual == request->length) {
505 musb_g_giveback(musb_ep, request, 0); 505 musb_g_giveback(musb_ep, request, 0);
506 506 request = musb_ep->desc ? next_request(musb_ep) : NULL;
507 /* 507 if (!request) {
508 * Kickstart next transfer if appropriate; 508 DBG(4, "%s idle now\n",
509 * the packet that just completed might not 509 musb_ep->end_point.name);
510 * be transmitted for hours or days. 510 return;
511 * REVISIT for double buffering... 511 }
512 * FIXME revisit for stalls too...
513 */
514 musb_ep_select(mbase, epnum);
515 csr = musb_readw(epio, MUSB_TXCSR);
516 if (csr & MUSB_TXCSR_FIFONOTEMPTY)
517 return;
518
519 request = musb_ep->desc ? next_request(musb_ep) : NULL;
520 if (!request) {
521 DBG(4, "%s idle now\n",
522 musb_ep->end_point.name);
523 return;
524 } 512 }
525 } 513 }
526 514
@@ -568,11 +556,19 @@ static void rxstate(struct musb *musb, struct musb_request *req)
568{ 556{
569 const u8 epnum = req->epnum; 557 const u8 epnum = req->epnum;
570 struct usb_request *request = &req->request; 558 struct usb_request *request = &req->request;
571 struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out; 559 struct musb_ep *musb_ep;
572 void __iomem *epio = musb->endpoints[epnum].regs; 560 void __iomem *epio = musb->endpoints[epnum].regs;
573 unsigned fifo_count = 0; 561 unsigned fifo_count = 0;
574 u16 len = musb_ep->packet_sz; 562 u16 len;
575 u16 csr = musb_readw(epio, MUSB_RXCSR); 563 u16 csr = musb_readw(epio, MUSB_RXCSR);
564 struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
565
566 if (hw_ep->is_shared_fifo)
567 musb_ep = &hw_ep->ep_in;
568 else
569 musb_ep = &hw_ep->ep_out;
570
571 len = musb_ep->packet_sz;
576 572
577 /* We shouldn't get here while DMA is active, but we do... */ 573 /* We shouldn't get here while DMA is active, but we do... */
578 if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) { 574 if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
@@ -647,8 +643,8 @@ static void rxstate(struct musb *musb, struct musb_request *req)
647 */ 643 */
648 644
649 csr |= MUSB_RXCSR_DMAENAB; 645 csr |= MUSB_RXCSR_DMAENAB;
650#ifdef USE_MODE1
651 csr |= MUSB_RXCSR_AUTOCLEAR; 646 csr |= MUSB_RXCSR_AUTOCLEAR;
647#ifdef USE_MODE1
652 /* csr |= MUSB_RXCSR_DMAMODE; */ 648 /* csr |= MUSB_RXCSR_DMAMODE; */
653 649
654 /* this special sequence (enabling and then 650 /* this special sequence (enabling and then
@@ -663,10 +659,11 @@ static void rxstate(struct musb *musb, struct musb_request *req)
663 if (request->actual < request->length) { 659 if (request->actual < request->length) {
664 int transfer_size = 0; 660 int transfer_size = 0;
665#ifdef USE_MODE1 661#ifdef USE_MODE1
666 transfer_size = min(request->length, 662 transfer_size = min(request->length - request->actual,
667 channel->max_len); 663 channel->max_len);
668#else 664#else
669 transfer_size = len; 665 transfer_size = min(request->length - request->actual,
666 (unsigned)len);
670#endif 667#endif
671 if (transfer_size <= musb_ep->packet_sz) 668 if (transfer_size <= musb_ep->packet_sz)
672 musb_ep->dma->desired_mode = 0; 669 musb_ep->dma->desired_mode = 0;
@@ -740,9 +737,15 @@ void musb_g_rx(struct musb *musb, u8 epnum)
740 u16 csr; 737 u16 csr;
741 struct usb_request *request; 738 struct usb_request *request;
742 void __iomem *mbase = musb->mregs; 739 void __iomem *mbase = musb->mregs;
743 struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out; 740 struct musb_ep *musb_ep;
744 void __iomem *epio = musb->endpoints[epnum].regs; 741 void __iomem *epio = musb->endpoints[epnum].regs;
745 struct dma_channel *dma; 742 struct dma_channel *dma;
743 struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
744
745 if (hw_ep->is_shared_fifo)
746 musb_ep = &hw_ep->ep_in;
747 else
748 musb_ep = &hw_ep->ep_out;
746 749
747 musb_ep_select(mbase, epnum); 750 musb_ep_select(mbase, epnum);
748 751
@@ -1081,7 +1084,7 @@ struct free_record {
1081/* 1084/*
1082 * Context: controller locked, IRQs blocked. 1085 * Context: controller locked, IRQs blocked.
1083 */ 1086 */
1084static void musb_ep_restart(struct musb *musb, struct musb_request *req) 1087void musb_ep_restart(struct musb *musb, struct musb_request *req)
1085{ 1088{
1086 DBG(3, "<== %s request %p len %u on hw_ep%d\n", 1089 DBG(3, "<== %s request %p len %u on hw_ep%d\n",
1087 req->tx ? "TX/IN" : "RX/OUT", 1090 req->tx ? "TX/IN" : "RX/OUT",
diff --git a/drivers/usb/musb/musb_gadget.h b/drivers/usb/musb/musb_gadget.h
index c8b140325d82..572b1da7f2dc 100644
--- a/drivers/usb/musb/musb_gadget.h
+++ b/drivers/usb/musb/musb_gadget.h
@@ -105,4 +105,6 @@ extern void musb_gadget_cleanup(struct musb *);
105 105
106extern void musb_g_giveback(struct musb_ep *, struct usb_request *, int); 106extern void musb_g_giveback(struct musb_ep *, struct usb_request *, int);
107 107
108extern void musb_ep_restart(struct musb *, struct musb_request *);
109
108#endif /* __MUSB_GADGET_H */ 110#endif /* __MUSB_GADGET_H */
diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c
index 59bef8f3a358..6dd03f4c5f49 100644
--- a/drivers/usb/musb/musb_gadget_ep0.c
+++ b/drivers/usb/musb/musb_gadget_ep0.c
@@ -261,6 +261,7 @@ __acquires(musb->lock)
261 ctrlrequest->wIndex & 0x0f; 261 ctrlrequest->wIndex & 0x0f;
262 struct musb_ep *musb_ep; 262 struct musb_ep *musb_ep;
263 struct musb_hw_ep *ep; 263 struct musb_hw_ep *ep;
264 struct musb_request *request;
264 void __iomem *regs; 265 void __iomem *regs;
265 int is_in; 266 int is_in;
266 u16 csr; 267 u16 csr;
@@ -302,6 +303,14 @@ __acquires(musb->lock)
302 musb_writew(regs, MUSB_RXCSR, csr); 303 musb_writew(regs, MUSB_RXCSR, csr);
303 } 304 }
304 305
306 /* Maybe start the first request in the queue */
307 request = to_musb_request(
308 next_request(musb_ep));
309 if (!musb_ep->busy && request) {
310 DBG(3, "restarting the request\n");
311 musb_ep_restart(musb, request);
312 }
313
305 /* select ep0 again */ 314 /* select ep0 again */
306 musb_ep_select(mbase, 0); 315 musb_ep_select(mbase, 0);
307 } break; 316 } break;
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 877d20b1dff9..9e65c47cc98b 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -660,6 +660,12 @@ static bool musb_tx_dma_program(struct dma_controller *dma,
660 660
661 qh->segsize = length; 661 qh->segsize = length;
662 662
663 /*
664 * Ensure the data reaches to main memory before starting
665 * DMA transfer
666 */
667 wmb();
668
663 if (!dma->channel_program(channel, pkt_size, mode, 669 if (!dma->channel_program(channel, pkt_size, mode,
664 urb->transfer_dma + offset, length)) { 670 urb->transfer_dma + offset, length)) {
665 dma->channel_release(channel); 671 dma->channel_release(channel);
diff --git a/drivers/usb/otg/twl4030-usb.c b/drivers/usb/otg/twl4030-usb.c
index 0e8888588d4e..0bc97698af15 100644
--- a/drivers/usb/otg/twl4030-usb.c
+++ b/drivers/usb/otg/twl4030-usb.c
@@ -347,11 +347,20 @@ static void twl4030_i2c_access(struct twl4030_usb *twl, int on)
347 } 347 }
348} 348}
349 349
350static void twl4030_phy_power(struct twl4030_usb *twl, int on) 350static void __twl4030_phy_power(struct twl4030_usb *twl, int on)
351{ 351{
352 u8 pwr; 352 u8 pwr = twl4030_usb_read(twl, PHY_PWR_CTRL);
353
354 if (on)
355 pwr &= ~PHY_PWR_PHYPWD;
356 else
357 pwr |= PHY_PWR_PHYPWD;
353 358
354 pwr = twl4030_usb_read(twl, PHY_PWR_CTRL); 359 WARN_ON(twl4030_usb_write_verify(twl, PHY_PWR_CTRL, pwr) < 0);
360}
361
362static void twl4030_phy_power(struct twl4030_usb *twl, int on)
363{
355 if (on) { 364 if (on) {
356 regulator_enable(twl->usb3v1); 365 regulator_enable(twl->usb3v1);
357 regulator_enable(twl->usb1v8); 366 regulator_enable(twl->usb1v8);
@@ -365,15 +374,13 @@ static void twl4030_phy_power(struct twl4030_usb *twl, int on)
365 twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, 374 twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0,
366 VUSB_DEDICATED2); 375 VUSB_DEDICATED2);
367 regulator_enable(twl->usb1v5); 376 regulator_enable(twl->usb1v5);
368 pwr &= ~PHY_PWR_PHYPWD; 377 __twl4030_phy_power(twl, 1);
369 WARN_ON(twl4030_usb_write_verify(twl, PHY_PWR_CTRL, pwr) < 0);
370 twl4030_usb_write(twl, PHY_CLK_CTRL, 378 twl4030_usb_write(twl, PHY_CLK_CTRL,
371 twl4030_usb_read(twl, PHY_CLK_CTRL) | 379 twl4030_usb_read(twl, PHY_CLK_CTRL) |
372 (PHY_CLK_CTRL_CLOCKGATING_EN | 380 (PHY_CLK_CTRL_CLOCKGATING_EN |
373 PHY_CLK_CTRL_CLK32K_EN)); 381 PHY_CLK_CTRL_CLK32K_EN));
374 } else { 382 } else {
375 pwr |= PHY_PWR_PHYPWD; 383 __twl4030_phy_power(twl, 0);
376 WARN_ON(twl4030_usb_write_verify(twl, PHY_PWR_CTRL, pwr) < 0);
377 regulator_disable(twl->usb1v5); 384 regulator_disable(twl->usb1v5);
378 regulator_disable(twl->usb1v8); 385 regulator_disable(twl->usb1v8);
379 regulator_disable(twl->usb3v1); 386 regulator_disable(twl->usb3v1);
@@ -387,19 +394,25 @@ static void twl4030_phy_suspend(struct twl4030_usb *twl, int controller_off)
387 394
388 twl4030_phy_power(twl, 0); 395 twl4030_phy_power(twl, 0);
389 twl->asleep = 1; 396 twl->asleep = 1;
397 dev_dbg(twl->dev, "%s\n", __func__);
390} 398}
391 399
392static void twl4030_phy_resume(struct twl4030_usb *twl) 400static void __twl4030_phy_resume(struct twl4030_usb *twl)
393{ 401{
394 if (!twl->asleep)
395 return;
396
397 twl4030_phy_power(twl, 1); 402 twl4030_phy_power(twl, 1);
398 twl4030_i2c_access(twl, 1); 403 twl4030_i2c_access(twl, 1);
399 twl4030_usb_set_mode(twl, twl->usb_mode); 404 twl4030_usb_set_mode(twl, twl->usb_mode);
400 if (twl->usb_mode == T2_USB_MODE_ULPI) 405 if (twl->usb_mode == T2_USB_MODE_ULPI)
401 twl4030_i2c_access(twl, 0); 406 twl4030_i2c_access(twl, 0);
407}
408
409static void twl4030_phy_resume(struct twl4030_usb *twl)
410{
411 if (!twl->asleep)
412 return;
413 __twl4030_phy_resume(twl);
402 twl->asleep = 0; 414 twl->asleep = 0;
415 dev_dbg(twl->dev, "%s\n", __func__);
403} 416}
404 417
405static int twl4030_usb_ldo_init(struct twl4030_usb *twl) 418static int twl4030_usb_ldo_init(struct twl4030_usb *twl)
@@ -408,8 +421,8 @@ static int twl4030_usb_ldo_init(struct twl4030_usb *twl)
408 twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0xC0, PROTECT_KEY); 421 twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0xC0, PROTECT_KEY);
409 twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0x0C, PROTECT_KEY); 422 twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0x0C, PROTECT_KEY);
410 423
411 /* put VUSB3V1 LDO in active state */ 424 /* Keep VUSB3V1 LDO in sleep state until VBUS/ID change detected*/
412 twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB_DEDICATED2); 425 /*twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB_DEDICATED2);*/
413 426
414 /* input to VUSB3V1 LDO is from VBAT, not VBUS */ 427 /* input to VUSB3V1 LDO is from VBAT, not VBUS */
415 twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0x14, VUSB_DEDICATED1); 428 twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0x14, VUSB_DEDICATED1);
@@ -502,6 +515,26 @@ static irqreturn_t twl4030_usb_irq(int irq, void *_twl)
502 return IRQ_HANDLED; 515 return IRQ_HANDLED;
503} 516}
504 517
518static void twl4030_usb_phy_init(struct twl4030_usb *twl)
519{
520 int status;
521
522 status = twl4030_usb_linkstat(twl);
523 if (status >= 0) {
524 if (status == USB_EVENT_NONE) {
525 __twl4030_phy_power(twl, 0);
526 twl->asleep = 1;
527 } else {
528 __twl4030_phy_resume(twl);
529 twl->asleep = 0;
530 }
531
532 blocking_notifier_call_chain(&twl->otg.notifier, status,
533 twl->otg.gadget);
534 }
535 sysfs_notify(&twl->dev->kobj, NULL, "vbus");
536}
537
505static int twl4030_set_suspend(struct otg_transceiver *x, int suspend) 538static int twl4030_set_suspend(struct otg_transceiver *x, int suspend)
506{ 539{
507 struct twl4030_usb *twl = xceiv_to_twl(x); 540 struct twl4030_usb *twl = xceiv_to_twl(x);
@@ -568,7 +601,7 @@ static int __devinit twl4030_usb_probe(struct platform_device *pdev)
568 twl->otg.set_peripheral = twl4030_set_peripheral; 601 twl->otg.set_peripheral = twl4030_set_peripheral;
569 twl->otg.set_suspend = twl4030_set_suspend; 602 twl->otg.set_suspend = twl4030_set_suspend;
570 twl->usb_mode = pdata->usb_mode; 603 twl->usb_mode = pdata->usb_mode;
571 twl->asleep = 1; 604 twl->asleep = 1;
572 605
573 /* init spinlock for workqueue */ 606 /* init spinlock for workqueue */
574 spin_lock_init(&twl->lock); 607 spin_lock_init(&twl->lock);
@@ -606,15 +639,10 @@ static int __devinit twl4030_usb_probe(struct platform_device *pdev)
606 return status; 639 return status;
607 } 640 }
608 641
609 /* The IRQ handler just handles changes from the previous states 642 /* Power down phy or make it work according to
610 * of the ID and VBUS pins ... in probe() we must initialize that 643 * current link state.
611 * previous state. The easy way: fake an IRQ.
612 *
613 * REVISIT: a real IRQ might have happened already, if PREEMPT is
614 * enabled. Else the IRQ may not yet be configured or enabled,
615 * because of scheduling delays.
616 */ 644 */
617 twl4030_usb_irq(twl->irq, twl); 645 twl4030_usb_phy_init(twl);
618 646
619 dev_info(&pdev->dev, "Initialized TWL4030 USB module\n"); 647 dev_info(&pdev->dev, "Initialized TWL4030 USB module\n");
620 return 0; 648 return 0;
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 2bef4415c19c..4f1744c5871f 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -56,6 +56,7 @@ static int debug;
56static const struct usb_device_id id_table[] = { 56static const struct usb_device_id id_table[] = {
57 { USB_DEVICE(0x0471, 0x066A) }, /* AKTAKOM ACE-1001 cable */ 57 { USB_DEVICE(0x0471, 0x066A) }, /* AKTAKOM ACE-1001 cable */
58 { USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */ 58 { USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
59 { USB_DEVICE(0x0489, 0xE003) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
59 { USB_DEVICE(0x0745, 0x1000) }, /* CipherLab USB CCD Barcode Scanner 1000 */ 60 { USB_DEVICE(0x0745, 0x1000) }, /* CipherLab USB CCD Barcode Scanner 1000 */
60 { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */ 61 { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
61 { USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */ 62 { USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */
@@ -88,6 +89,7 @@ static const struct usb_device_id id_table[] = {
88 { USB_DEVICE(0x10C4, 0x8149) }, /* West Mountain Radio Computerized Battery Analyzer */ 89 { USB_DEVICE(0x10C4, 0x8149) }, /* West Mountain Radio Computerized Battery Analyzer */
89 { USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */ 90 { USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */
90 { USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */ 91 { USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */
92 { USB_DEVICE(0x10C4, 0x8156) }, /* B&G H3000 link cable */
91 { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */ 93 { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */
92 { USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */ 94 { USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */
93 { USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */ 95 { USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */
@@ -109,6 +111,7 @@ static const struct usb_device_id id_table[] = {
109 { USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */ 111 { USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */
110 { USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */ 112 { USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */
111 { USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */ 113 { USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
114 { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
112 { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ 115 { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
113 { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ 116 { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
114 { USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */ 117 { USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */
@@ -122,14 +125,14 @@ static const struct usb_device_id id_table[] = {
122 { USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */ 125 { USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */
123 { USB_DEVICE(0x166A, 0x0303) }, /* Clipsal 5500PCU C-Bus USB interface */ 126 { USB_DEVICE(0x166A, 0x0303) }, /* Clipsal 5500PCU C-Bus USB interface */
124 { USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */ 127 { USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */
125 { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */
126 { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
127 { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
128 { USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */
129 { USB_DEVICE(0x16DC, 0x0010) }, /* W-IE-NE-R Plein & Baus GmbH PL512 Power Supply */ 128 { USB_DEVICE(0x16DC, 0x0010) }, /* W-IE-NE-R Plein & Baus GmbH PL512 Power Supply */
130 { USB_DEVICE(0x16DC, 0x0011) }, /* W-IE-NE-R Plein & Baus GmbH RCM Remote Control for MARATON Power Supply */ 129 { USB_DEVICE(0x16DC, 0x0011) }, /* W-IE-NE-R Plein & Baus GmbH RCM Remote Control for MARATON Power Supply */
131 { USB_DEVICE(0x16DC, 0x0012) }, /* W-IE-NE-R Plein & Baus GmbH MPOD Multi Channel Power Supply */ 130 { USB_DEVICE(0x16DC, 0x0012) }, /* W-IE-NE-R Plein & Baus GmbH MPOD Multi Channel Power Supply */
132 { USB_DEVICE(0x16DC, 0x0015) }, /* W-IE-NE-R Plein & Baus GmbH CML Control, Monitoring and Data Logger */ 131 { USB_DEVICE(0x16DC, 0x0015) }, /* W-IE-NE-R Plein & Baus GmbH CML Control, Monitoring and Data Logger */
132 { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */
133 { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
134 { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
135 { USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */
133 { } /* Terminating Entry */ 136 { } /* Terminating Entry */
134}; 137};
135 138
@@ -222,8 +225,8 @@ static struct usb_serial_driver cp210x_device = {
222#define BITS_STOP_2 0x0002 225#define BITS_STOP_2 0x0002
223 226
224/* CP210X_SET_BREAK */ 227/* CP210X_SET_BREAK */
225#define BREAK_ON 0x0000 228#define BREAK_ON 0x0001
226#define BREAK_OFF 0x0001 229#define BREAK_OFF 0x0000
227 230
228/* CP210X_(SET_MHS|GET_MDMSTS) */ 231/* CP210X_(SET_MHS|GET_MDMSTS) */
229#define CONTROL_DTR 0x0001 232#define CONTROL_DTR 0x0001
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index eb12d9b096b4..97cc87d654ce 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -180,6 +180,7 @@ static struct usb_device_id id_table_combined [] = {
180 { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) }, 180 { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) },
181 { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) }, 181 { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) },
182 { USB_DEVICE(FTDI_VID, FTDI_SPROG_II) }, 182 { USB_DEVICE(FTDI_VID, FTDI_SPROG_II) },
183 { USB_DEVICE(FTDI_VID, FTDI_LENZ_LIUSB_PID) },
183 { USB_DEVICE(FTDI_VID, FTDI_XF_632_PID) }, 184 { USB_DEVICE(FTDI_VID, FTDI_XF_632_PID) },
184 { USB_DEVICE(FTDI_VID, FTDI_XF_634_PID) }, 185 { USB_DEVICE(FTDI_VID, FTDI_XF_634_PID) },
185 { USB_DEVICE(FTDI_VID, FTDI_XF_547_PID) }, 186 { USB_DEVICE(FTDI_VID, FTDI_XF_547_PID) },
@@ -750,6 +751,16 @@ static struct usb_device_id id_table_combined [] = {
750 { USB_DEVICE(FTDI_VID, XVERVE_SIGNALYZER_SH4_PID), 751 { USB_DEVICE(FTDI_VID, XVERVE_SIGNALYZER_SH4_PID),
751 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, 752 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
752 { USB_DEVICE(FTDI_VID, SEGWAY_RMP200_PID) }, 753 { USB_DEVICE(FTDI_VID, SEGWAY_RMP200_PID) },
754 { USB_DEVICE(IONICS_VID, IONICS_PLUGCOMPUTER_PID),
755 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
756 { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_24_MASTER_WING_PID) },
757 { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_PC_WING_PID) },
758 { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_USB_DMX_PID) },
759 { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MIDI_TIMECODE_PID) },
760 { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MINI_WING_PID) },
761 { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MAXI_WING_PID) },
762 { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MEDIA_WING_PID) },
763 { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_WING_PID) },
753 { }, /* Optional parameter entry */ 764 { }, /* Optional parameter entry */
754 { } /* Terminating entry */ 765 { } /* Terminating entry */
755}; 766};
@@ -1376,7 +1387,7 @@ static void ftdi_set_max_packet_size(struct usb_serial_port *port)
1376 } 1387 }
1377 1388
1378 /* set max packet size based on descriptor */ 1389 /* set max packet size based on descriptor */
1379 priv->max_packet_size = ep_desc->wMaxPacketSize; 1390 priv->max_packet_size = le16_to_cpu(ep_desc->wMaxPacketSize);
1380 1391
1381 dev_info(&udev->dev, "Setting MaxPacketSize %d\n", priv->max_packet_size); 1392 dev_info(&udev->dev, "Setting MaxPacketSize %d\n", priv->max_packet_size);
1382} 1393}
@@ -1831,7 +1842,7 @@ static int ftdi_process_packet(struct tty_struct *tty,
1831 1842
1832 if (port->port.console && port->sysrq) { 1843 if (port->port.console && port->sysrq) {
1833 for (i = 0; i < len; i++, ch++) { 1844 for (i = 0; i < len; i++, ch++) {
1834 if (!usb_serial_handle_sysrq_char(tty, port, *ch)) 1845 if (!usb_serial_handle_sysrq_char(port, *ch))
1835 tty_insert_flip_char(tty, *ch, flag); 1846 tty_insert_flip_char(tty, *ch, flag);
1836 } 1847 }
1837 } else { 1848 } else {
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 6e612c52e763..15a4583775ad 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -110,6 +110,9 @@
110/* Propox devices */ 110/* Propox devices */
111#define FTDI_PROPOX_JTAGCABLEII_PID 0xD738 111#define FTDI_PROPOX_JTAGCABLEII_PID 0xD738
112 112
113/* Lenz LI-USB Computer Interface. */
114#define FTDI_LENZ_LIUSB_PID 0xD780
115
113/* 116/*
114 * Xsens Technologies BV products (http://www.xsens.com). 117 * Xsens Technologies BV products (http://www.xsens.com).
115 */ 118 */
@@ -132,6 +135,18 @@
132#define FTDI_NDI_AURORA_SCU_PID 0xDA74 /* NDI Aurora SCU */ 135#define FTDI_NDI_AURORA_SCU_PID 0xDA74 /* NDI Aurora SCU */
133 136
134/* 137/*
138 * ChamSys Limited (www.chamsys.co.uk) USB wing/interface product IDs
139 */
140#define FTDI_CHAMSYS_24_MASTER_WING_PID 0xDAF8
141#define FTDI_CHAMSYS_PC_WING_PID 0xDAF9
142#define FTDI_CHAMSYS_USB_DMX_PID 0xDAFA
143#define FTDI_CHAMSYS_MIDI_TIMECODE_PID 0xDAFB
144#define FTDI_CHAMSYS_MINI_WING_PID 0xDAFC
145#define FTDI_CHAMSYS_MAXI_WING_PID 0xDAFD
146#define FTDI_CHAMSYS_MEDIA_WING_PID 0xDAFE
147#define FTDI_CHAMSYS_WING_PID 0xDAFF
148
149/*
135 * Westrex International devices submitted by Cory Lee 150 * Westrex International devices submitted by Cory Lee
136 */ 151 */
137#define FTDI_WESTREX_MODEL_777_PID 0xDC00 /* Model 777 */ 152#define FTDI_WESTREX_MODEL_777_PID 0xDC00 /* Model 777 */
@@ -989,6 +1004,12 @@
989#define ALTI2_N3_PID 0x6001 /* Neptune 3 */ 1004#define ALTI2_N3_PID 0x6001 /* Neptune 3 */
990 1005
991/* 1006/*
1007 * Ionics PlugComputer
1008 */
1009#define IONICS_VID 0x1c0c
1010#define IONICS_PLUGCOMPUTER_PID 0x0102
1011
1012/*
992 * Dresden Elektronik Sensor Terminal Board 1013 * Dresden Elektronik Sensor Terminal Board
993 */ 1014 */
994#define DE_VID 0x1cf1 /* Vendor ID */ 1015#define DE_VID 0x1cf1 /* Vendor ID */
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index ca92f67747cc..e6833e216fc9 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -343,7 +343,7 @@ void usb_serial_generic_process_read_urb(struct urb *urb)
343 tty_insert_flip_string(tty, ch, urb->actual_length); 343 tty_insert_flip_string(tty, ch, urb->actual_length);
344 else { 344 else {
345 for (i = 0; i < urb->actual_length; i++, ch++) { 345 for (i = 0; i < urb->actual_length; i++, ch++) {
346 if (!usb_serial_handle_sysrq_char(tty, port, *ch)) 346 if (!usb_serial_handle_sysrq_char(port, *ch))
347 tty_insert_flip_char(tty, *ch, TTY_NORMAL); 347 tty_insert_flip_char(tty, *ch, TTY_NORMAL);
348 } 348 }
349 } 349 }
@@ -448,12 +448,11 @@ void usb_serial_generic_unthrottle(struct tty_struct *tty)
448EXPORT_SYMBOL_GPL(usb_serial_generic_unthrottle); 448EXPORT_SYMBOL_GPL(usb_serial_generic_unthrottle);
449 449
450#ifdef CONFIG_MAGIC_SYSRQ 450#ifdef CONFIG_MAGIC_SYSRQ
451int usb_serial_handle_sysrq_char(struct tty_struct *tty, 451int usb_serial_handle_sysrq_char(struct usb_serial_port *port, unsigned int ch)
452 struct usb_serial_port *port, unsigned int ch)
453{ 452{
454 if (port->sysrq && port->port.console) { 453 if (port->sysrq && port->port.console) {
455 if (ch && time_before(jiffies, port->sysrq)) { 454 if (ch && time_before(jiffies, port->sysrq)) {
456 handle_sysrq(ch, tty); 455 handle_sysrq(ch);
457 port->sysrq = 0; 456 port->sysrq = 0;
458 return 1; 457 return 1;
459 } 458 }
@@ -462,8 +461,7 @@ int usb_serial_handle_sysrq_char(struct tty_struct *tty,
462 return 0; 461 return 0;
463} 462}
464#else 463#else
465int usb_serial_handle_sysrq_char(struct tty_struct *tty, 464int usb_serial_handle_sysrq_char(struct usb_serial_port *port, unsigned int ch)
466 struct usb_serial_port *port, unsigned int ch)
467{ 465{
468 return 0; 466 return 0;
469} 467}
@@ -518,6 +516,7 @@ void usb_serial_generic_disconnect(struct usb_serial *serial)
518 for (i = 0; i < serial->num_ports; ++i) 516 for (i = 0; i < serial->num_ports; ++i)
519 generic_cleanup(serial->port[i]); 517 generic_cleanup(serial->port[i]);
520} 518}
519EXPORT_SYMBOL_GPL(usb_serial_generic_disconnect);
521 520
522void usb_serial_generic_release(struct usb_serial *serial) 521void usb_serial_generic_release(struct usb_serial *serial)
523{ 522{
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index dc47f986df57..a7cfc5952937 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -1151,7 +1151,7 @@ static int download_fw(struct edgeport_serial *serial)
1151 1151
1152 /* Check if we have an old version in the I2C and 1152 /* Check if we have an old version in the I2C and
1153 update if necessary */ 1153 update if necessary */
1154 if (download_cur_ver != download_new_ver) { 1154 if (download_cur_ver < download_new_ver) {
1155 dbg("%s - Update I2C dld from %d.%d to %d.%d", 1155 dbg("%s - Update I2C dld from %d.%d to %d.%d",
1156 __func__, 1156 __func__,
1157 firmware_version->Ver_Major, 1157 firmware_version->Ver_Major,
@@ -1284,7 +1284,7 @@ static int download_fw(struct edgeport_serial *serial)
1284 kfree(header); 1284 kfree(header);
1285 kfree(rom_desc); 1285 kfree(rom_desc);
1286 kfree(ti_manuf_desc); 1286 kfree(ti_manuf_desc);
1287 return status; 1287 return -EINVAL;
1288 } 1288 }
1289 1289
1290 /* Update I2C with type 0xf2 record with correct 1290 /* Update I2C with type 0xf2 record with correct
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 30922a7e3347..aa665817a272 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -2024,6 +2024,9 @@ static int mos7720_ioctl(struct tty_struct *tty, struct file *file,
2024 2024
2025 case TIOCGICOUNT: 2025 case TIOCGICOUNT:
2026 cnow = mos7720_port->icount; 2026 cnow = mos7720_port->icount;
2027
2028 memset(&icount, 0, sizeof(struct serial_icounter_struct));
2029
2027 icount.cts = cnow.cts; 2030 icount.cts = cnow.cts;
2028 icount.dsr = cnow.dsr; 2031 icount.dsr = cnow.dsr;
2029 icount.rng = cnow.rng; 2032 icount.rng = cnow.rng;
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 585b7e663740..1a42bc213799 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -119,16 +119,20 @@
119 * by making a change here, in moschip_port_id_table, and in 119 * by making a change here, in moschip_port_id_table, and in
120 * moschip_id_table_combined 120 * moschip_id_table_combined
121 */ 121 */
122#define USB_VENDOR_ID_BANDB 0x0856 122#define USB_VENDOR_ID_BANDB 0x0856
123#define BANDB_DEVICE_ID_USO9ML2_2 0xAC22 123#define BANDB_DEVICE_ID_USO9ML2_2 0xAC22
124#define BANDB_DEVICE_ID_USO9ML2_4 0xAC24 124#define BANDB_DEVICE_ID_USO9ML2_2P 0xBC00
125#define BANDB_DEVICE_ID_US9ML2_2 0xAC29 125#define BANDB_DEVICE_ID_USO9ML2_4 0xAC24
126#define BANDB_DEVICE_ID_US9ML2_4 0xAC30 126#define BANDB_DEVICE_ID_USO9ML2_4P 0xBC01
127#define BANDB_DEVICE_ID_USPTL4_2 0xAC31 127#define BANDB_DEVICE_ID_US9ML2_2 0xAC29
128#define BANDB_DEVICE_ID_USPTL4_4 0xAC32 128#define BANDB_DEVICE_ID_US9ML2_4 0xAC30
129#define BANDB_DEVICE_ID_USOPTL4_2 0xAC42 129#define BANDB_DEVICE_ID_USPTL4_2 0xAC31
130#define BANDB_DEVICE_ID_USOPTL4_4 0xAC44 130#define BANDB_DEVICE_ID_USPTL4_4 0xAC32
131#define BANDB_DEVICE_ID_USOPTL2_4 0xAC24 131#define BANDB_DEVICE_ID_USOPTL4_2 0xAC42
132#define BANDB_DEVICE_ID_USOPTL4_2P 0xBC02
133#define BANDB_DEVICE_ID_USOPTL4_4 0xAC44
134#define BANDB_DEVICE_ID_USOPTL4_4P 0xBC03
135#define BANDB_DEVICE_ID_USOPTL2_4 0xAC24
132 136
133/* This driver also supports 137/* This driver also supports
134 * ATEN UC2324 device using Moschip MCS7840 138 * ATEN UC2324 device using Moschip MCS7840
@@ -184,13 +188,17 @@ static const struct usb_device_id moschip_port_id_table[] = {
184 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)}, 188 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)},
185 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)}, 189 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)},
186 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)}, 190 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)},
191 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2P)},
187 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4)}, 192 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4)},
193 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4P)},
188 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_2)}, 194 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_2)},
189 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_4)}, 195 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_4)},
190 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_2)}, 196 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_2)},
191 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4)}, 197 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4)},
192 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)}, 198 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)},
199 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2P)},
193 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)}, 200 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)},
201 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4P)},
194 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4)}, 202 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4)},
195 {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)}, 203 {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)},
196 {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)}, 204 {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)},
@@ -201,13 +209,17 @@ static const struct usb_device_id moschip_id_table_combined[] __devinitconst = {
201 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)}, 209 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)},
202 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)}, 210 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)},
203 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)}, 211 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)},
212 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2P)},
204 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4)}, 213 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4)},
214 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4P)},
205 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_2)}, 215 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_2)},
206 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_4)}, 216 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_4)},
207 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_2)}, 217 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_2)},
208 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4)}, 218 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4)},
209 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)}, 219 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)},
220 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2P)},
210 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)}, 221 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)},
222 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4P)},
211 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4)}, 223 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4)},
212 {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)}, 224 {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)},
213 {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)}, 225 {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)},
@@ -2273,6 +2285,9 @@ static int mos7840_ioctl(struct tty_struct *tty, struct file *file,
2273 case TIOCGICOUNT: 2285 case TIOCGICOUNT:
2274 cnow = mos7840_port->icount; 2286 cnow = mos7840_port->icount;
2275 smp_rmb(); 2287 smp_rmb();
2288
2289 memset(&icount, 0, sizeof(struct serial_icounter_struct));
2290
2276 icount.cts = cnow.cts; 2291 icount.cts = cnow.cts;
2277 icount.dsr = cnow.dsr; 2292 icount.dsr = cnow.dsr;
2278 icount.rng = cnow.rng; 2293 icount.rng = cnow.rng;
diff --git a/drivers/usb/serial/navman.c b/drivers/usb/serial/navman.c
index a6b207c84917..1f00f243c26c 100644
--- a/drivers/usb/serial/navman.c
+++ b/drivers/usb/serial/navman.c
@@ -25,6 +25,7 @@ static int debug;
25 25
26static const struct usb_device_id id_table[] = { 26static const struct usb_device_id id_table[] = {
27 { USB_DEVICE(0x0a99, 0x0001) }, /* Talon Technology device */ 27 { USB_DEVICE(0x0a99, 0x0001) }, /* Talon Technology device */
28 { USB_DEVICE(0x0df7, 0x0900) }, /* Mobile Action i-gotU */
28 { }, 29 { },
29}; 30};
30MODULE_DEVICE_TABLE(usb, id_table); 31MODULE_DEVICE_TABLE(usb, id_table);
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 9fc6ea2c681f..c46911af282f 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -164,6 +164,14 @@ static void option_instat_callback(struct urb *urb);
164#define YISO_VENDOR_ID 0x0EAB 164#define YISO_VENDOR_ID 0x0EAB
165#define YISO_PRODUCT_U893 0xC893 165#define YISO_PRODUCT_U893 0xC893
166 166
167/*
168 * NOVATEL WIRELESS PRODUCTS
169 *
170 * Note from Novatel Wireless:
171 * If your Novatel modem does not work on linux, don't
172 * change the option module, but check our website. If
173 * that does not help, contact ddeschepper@nvtl.com
174*/
167/* MERLIN EVDO PRODUCTS */ 175/* MERLIN EVDO PRODUCTS */
168#define NOVATELWIRELESS_PRODUCT_V640 0x1100 176#define NOVATELWIRELESS_PRODUCT_V640 0x1100
169#define NOVATELWIRELESS_PRODUCT_V620 0x1110 177#define NOVATELWIRELESS_PRODUCT_V620 0x1110
@@ -185,24 +193,39 @@ static void option_instat_callback(struct urb *urb);
185#define NOVATELWIRELESS_PRODUCT_EU730 0x2400 193#define NOVATELWIRELESS_PRODUCT_EU730 0x2400
186#define NOVATELWIRELESS_PRODUCT_EU740 0x2410 194#define NOVATELWIRELESS_PRODUCT_EU740 0x2410
187#define NOVATELWIRELESS_PRODUCT_EU870D 0x2420 195#define NOVATELWIRELESS_PRODUCT_EU870D 0x2420
188
189/* OVATION PRODUCTS */ 196/* OVATION PRODUCTS */
190#define NOVATELWIRELESS_PRODUCT_MC727 0x4100 197#define NOVATELWIRELESS_PRODUCT_MC727 0x4100
191#define NOVATELWIRELESS_PRODUCT_MC950D 0x4400 198#define NOVATELWIRELESS_PRODUCT_MC950D 0x4400
192#define NOVATELWIRELESS_PRODUCT_U727 0x5010 199/*
193#define NOVATELWIRELESS_PRODUCT_MC727_NEW 0x5100 200 * Note from Novatel Wireless:
194#define NOVATELWIRELESS_PRODUCT_MC760 0x6000 201 * All PID in the 5xxx range are currently reserved for
202 * auto-install CDROMs, and should not be added to this
203 * module.
204 *
205 * #define NOVATELWIRELESS_PRODUCT_U727 0x5010
206 * #define NOVATELWIRELESS_PRODUCT_MC727_NEW 0x5100
207*/
195#define NOVATELWIRELESS_PRODUCT_OVMC760 0x6002 208#define NOVATELWIRELESS_PRODUCT_OVMC760 0x6002
196 209#define NOVATELWIRELESS_PRODUCT_MC780 0x6010
197/* FUTURE NOVATEL PRODUCTS */ 210#define NOVATELWIRELESS_PRODUCT_EVDO_FULLSPEED 0x6000
198#define NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED 0X6001 211#define NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED 0x6001
199#define NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED 0X7000 212#define NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED 0x7000
200#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED 0X7001 213#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED 0x7001
201#define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED 0X8000 214#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED3 0x7003
202#define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED 0X8001 215#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED4 0x7004
203#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED 0X9000 216#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED5 0x7005
204#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED 0X9001 217#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED6 0x7006
205#define NOVATELWIRELESS_PRODUCT_GLOBAL 0XA001 218#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED7 0x7007
219#define NOVATELWIRELESS_PRODUCT_MC996D 0x7030
220#define NOVATELWIRELESS_PRODUCT_MF3470 0x7041
221#define NOVATELWIRELESS_PRODUCT_MC547 0x7042
222#define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED 0x8000
223#define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED 0x8001
224#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED 0x9000
225#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED 0x9001
226#define NOVATELWIRELESS_PRODUCT_G1 0xA001
227#define NOVATELWIRELESS_PRODUCT_G1_M 0xA002
228#define NOVATELWIRELESS_PRODUCT_G2 0xA010
206 229
207/* AMOI PRODUCTS */ 230/* AMOI PRODUCTS */
208#define AMOI_VENDOR_ID 0x1614 231#define AMOI_VENDOR_ID 0x1614
@@ -365,6 +388,10 @@ static void option_instat_callback(struct urb *urb);
365#define OLIVETTI_VENDOR_ID 0x0b3c 388#define OLIVETTI_VENDOR_ID 0x0b3c
366#define OLIVETTI_PRODUCT_OLICARD100 0xc000 389#define OLIVETTI_PRODUCT_OLICARD100 0xc000
367 390
391/* Celot products */
392#define CELOT_VENDOR_ID 0x211f
393#define CELOT_PRODUCT_CT680M 0x6801
394
368/* some devices interfaces need special handling due to a number of reasons */ 395/* some devices interfaces need special handling due to a number of reasons */
369enum option_blacklist_reason { 396enum option_blacklist_reason {
370 OPTION_BLACKLIST_NONE = 0, 397 OPTION_BLACKLIST_NONE = 0,
@@ -486,36 +513,44 @@ static const struct usb_device_id option_ids[] = {
486 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff) }, 513 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff) },
487 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) }, 514 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) },
488 { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC) }, 515 { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC) },
489 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) }, /* Novatel Merlin V640/XV620 */ 516 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
490 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) }, /* Novatel Merlin V620/S620 */ 517 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) },
491 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) }, /* Novatel Merlin EX720/V740/X720 */ 518 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) },
492 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V720) }, /* Novatel Merlin V720/S720/PC720 */ 519 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V720) },
493 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U730) }, /* Novatel U730/U740 (VF version) */ 520 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U730) },
494 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U740) }, /* Novatel U740 */ 521 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U740) },
495 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U870) }, /* Novatel U870 */ 522 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U870) },
496 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_XU870) }, /* Novatel Merlin XU870 HSDPA/3G */ 523 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_XU870) },
497 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_X950D) }, /* Novatel X950D */ 524 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_X950D) },
498 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EV620) }, /* Novatel EV620/ES620 CDMA/EV-DO */ 525 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EV620) },
499 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_ES720) }, /* Novatel ES620/ES720/U720/USB720 */ 526 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_ES720) },
500 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E725) }, /* Novatel E725/E726 */ 527 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E725) },
501 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_ES620) }, /* Novatel Merlin ES620 SM Bus */ 528 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_ES620) },
502 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU730) }, /* Novatel EU730 and Vodafone EU740 */ 529 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU730) },
503 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU740) }, /* Novatel non-Vodafone EU740 */ 530 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU740) },
504 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU870D) }, /* Novatel EU850D/EU860D/EU870D */ 531 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU870D) },
505 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC950D) }, /* Novatel MC930D/MC950D */ 532 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC950D) },
506 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727) }, /* Novatel MC727/U727/USB727 */ 533 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727) },
507 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727_NEW) }, /* Novatel MC727/U727/USB727 refresh */ 534 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_OVMC760) },
508 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U727) }, /* Novatel MC727/U727/USB727 */ 535 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC780) },
509 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC760) }, /* Novatel MC760/U760/USB760 */ 536 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_FULLSPEED) },
510 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_OVMC760) }, /* Novatel Ovation MC760 */ 537 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED) },
511 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED) }, /* Novatel HSPA product */ 538 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED) },
512 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED) }, /* Novatel EVDO Embedded product */ 539 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED) },
513 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED) }, /* Novatel HSPA Embedded product */ 540 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED) },
514 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED) }, /* Novatel EVDO product */ 541 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED3) },
515 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED) }, /* Novatel HSPA product */ 542 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED4) },
516 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED) }, /* Novatel EVDO Embedded product */ 543 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED5) },
517 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED) }, /* Novatel HSPA Embedded product */ 544 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED6) },
518 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_GLOBAL) }, /* Novatel Global product */ 545 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED7) },
546 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC996D) },
547 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MF3470) },
548 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC547) },
549 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED) },
550 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED) },
551 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G1) },
552 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G1_M) },
553 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G2) },
519 554
520 { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) }, 555 { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) },
521 { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) }, 556 { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) },
@@ -887,10 +922,9 @@ static const struct usb_device_id option_ids[] = {
887 { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100F) }, 922 { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100F) },
888 { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1011)}, 923 { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1011)},
889 { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1012)}, 924 { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1012)},
890
891 { USB_DEVICE(CINTERION_VENDOR_ID, 0x0047) }, 925 { USB_DEVICE(CINTERION_VENDOR_ID, 0x0047) },
892
893 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) }, 926 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) },
927 { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
894 { } /* Terminating entry */ 928 { } /* Terminating entry */
895}; 929};
896MODULE_DEVICE_TABLE(usb, option_ids); 930MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 6b6001822279..8ae4c6cbc38a 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -86,6 +86,7 @@ static const struct usb_device_id id_table[] = {
86 { USB_DEVICE(SUPERIAL_VENDOR_ID, SUPERIAL_PRODUCT_ID) }, 86 { USB_DEVICE(SUPERIAL_VENDOR_ID, SUPERIAL_PRODUCT_ID) },
87 { USB_DEVICE(HP_VENDOR_ID, HP_LD220_PRODUCT_ID) }, 87 { USB_DEVICE(HP_VENDOR_ID, HP_LD220_PRODUCT_ID) },
88 { USB_DEVICE(CRESSI_VENDOR_ID, CRESSI_EDY_PRODUCT_ID) }, 88 { USB_DEVICE(CRESSI_VENDOR_ID, CRESSI_EDY_PRODUCT_ID) },
89 { USB_DEVICE(ZEAGLE_VENDOR_ID, ZEAGLE_N2ITION3_PRODUCT_ID) },
89 { USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) }, 90 { USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) },
90 { USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) }, 91 { USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) },
91 { USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530_PRODUCT_ID) }, 92 { USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530_PRODUCT_ID) },
@@ -788,7 +789,7 @@ static void pl2303_process_read_urb(struct urb *urb)
788 789
789 if (port->port.console && port->sysrq) { 790 if (port->port.console && port->sysrq) {
790 for (i = 0; i < urb->actual_length; ++i) 791 for (i = 0; i < urb->actual_length; ++i)
791 if (!usb_serial_handle_sysrq_char(tty, port, data[i])) 792 if (!usb_serial_handle_sysrq_char(port, data[i]))
792 tty_insert_flip_char(tty, data[i], tty_flag); 793 tty_insert_flip_char(tty, data[i], tty_flag);
793 } else { 794 } else {
794 tty_insert_flip_string_fixed_flag(tty, data, tty_flag, 795 tty_insert_flip_string_fixed_flag(tty, data, tty_flag,
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index a871645389dd..43eb9bdad422 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -128,6 +128,10 @@
128#define CRESSI_VENDOR_ID 0x04b8 128#define CRESSI_VENDOR_ID 0x04b8
129#define CRESSI_EDY_PRODUCT_ID 0x0521 129#define CRESSI_EDY_PRODUCT_ID 0x0521
130 130
131/* Zeagle dive computer interface */
132#define ZEAGLE_VENDOR_ID 0x04b8
133#define ZEAGLE_N2ITION3_PRODUCT_ID 0x0522
134
131/* Sony, USB data cable for CMD-Jxx mobile phones */ 135/* Sony, USB data cable for CMD-Jxx mobile phones */
132#define SONY_VENDOR_ID 0x054c 136#define SONY_VENDOR_ID 0x054c
133#define SONY_QN3USB_PRODUCT_ID 0x0437 137#define SONY_QN3USB_PRODUCT_ID 0x0437
diff --git a/drivers/usb/serial/ssu100.c b/drivers/usb/serial/ssu100.c
index 6e82d4f54bc8..e986002b3844 100644
--- a/drivers/usb/serial/ssu100.c
+++ b/drivers/usb/serial/ssu100.c
@@ -15,6 +15,7 @@
15#include <linux/serial.h> 15#include <linux/serial.h>
16#include <linux/usb.h> 16#include <linux/usb.h>
17#include <linux/usb/serial.h> 17#include <linux/usb/serial.h>
18#include <linux/serial_reg.h>
18#include <linux/uaccess.h> 19#include <linux/uaccess.h>
19 20
20#define QT_OPEN_CLOSE_CHANNEL 0xca 21#define QT_OPEN_CLOSE_CHANNEL 0xca
@@ -27,36 +28,11 @@
27#define QT_HW_FLOW_CONTROL_MASK 0xc5 28#define QT_HW_FLOW_CONTROL_MASK 0xc5
28#define QT_SW_FLOW_CONTROL_MASK 0xc6 29#define QT_SW_FLOW_CONTROL_MASK 0xc6
29 30
30#define MODEM_CTL_REGISTER 0x04
31#define MODEM_STATUS_REGISTER 0x06
32
33
34#define SERIAL_LSR_OE 0x02
35#define SERIAL_LSR_PE 0x04
36#define SERIAL_LSR_FE 0x08
37#define SERIAL_LSR_BI 0x10
38
39#define SERIAL_LSR_TEMT 0x40
40
41#define SERIAL_MCR_DTR 0x01
42#define SERIAL_MCR_RTS 0x02
43#define SERIAL_MCR_LOOP 0x10
44
45#define SERIAL_MSR_CTS 0x10
46#define SERIAL_MSR_CD 0x80
47#define SERIAL_MSR_RI 0x40
48#define SERIAL_MSR_DSR 0x20
49#define SERIAL_MSR_MASK 0xf0 31#define SERIAL_MSR_MASK 0xf0
50 32
51#define SERIAL_CRTSCTS ((SERIAL_MCR_RTS << 8) | SERIAL_MSR_CTS) 33#define SERIAL_CRTSCTS ((UART_MCR_RTS << 8) | UART_MSR_CTS)
52 34
53#define SERIAL_8_DATA 0x03 35#define SERIAL_EVEN_PARITY (UART_LCR_PARITY | UART_LCR_EPAR)
54#define SERIAL_7_DATA 0x02
55#define SERIAL_6_DATA 0x01
56#define SERIAL_5_DATA 0x00
57
58#define SERIAL_ODD_PARITY 0X08
59#define SERIAL_EVEN_PARITY 0X18
60 36
61#define MAX_BAUD_RATE 460800 37#define MAX_BAUD_RATE 460800
62 38
@@ -70,7 +46,7 @@
70#define FULLPWRBIT 0x00000080 46#define FULLPWRBIT 0x00000080
71#define NEXT_BOARD_POWER_BIT 0x00000004 47#define NEXT_BOARD_POWER_BIT 0x00000004
72 48
73static int debug = 1; 49static int debug;
74 50
75/* Version Information */ 51/* Version Information */
76#define DRIVER_VERSION "v0.1" 52#define DRIVER_VERSION "v0.1"
@@ -99,10 +75,12 @@ static struct usb_driver ssu100_driver = {
99}; 75};
100 76
101struct ssu100_port_private { 77struct ssu100_port_private {
78 spinlock_t status_lock;
102 u8 shadowLSR; 79 u8 shadowLSR;
103 u8 shadowMSR; 80 u8 shadowMSR;
104 wait_queue_head_t delta_msr_wait; /* Used for TIOCMIWAIT */ 81 wait_queue_head_t delta_msr_wait; /* Used for TIOCMIWAIT */
105 unsigned short max_packet_size; 82 unsigned short max_packet_size;
83 struct async_icount icount;
106}; 84};
107 85
108static void ssu100_release(struct usb_serial *serial) 86static void ssu100_release(struct usb_serial *serial)
@@ -150,9 +128,10 @@ static inline int ssu100_getregister(struct usb_device *dev,
150 128
151static inline int ssu100_setregister(struct usb_device *dev, 129static inline int ssu100_setregister(struct usb_device *dev,
152 unsigned short uart, 130 unsigned short uart,
131 unsigned short reg,
153 u16 data) 132 u16 data)
154{ 133{
155 u16 value = (data << 8) | MODEM_CTL_REGISTER; 134 u16 value = (data << 8) | reg;
156 135
157 return usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 136 return usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
158 QT_SET_GET_REGISTER, 0x40, value, uart, 137 QT_SET_GET_REGISTER, 0x40, value, uart,
@@ -178,11 +157,11 @@ static inline int update_mctrl(struct usb_device *dev, unsigned int set,
178 clear &= ~set; /* 'set' takes precedence over 'clear' */ 157 clear &= ~set; /* 'set' takes precedence over 'clear' */
179 urb_value = 0; 158 urb_value = 0;
180 if (set & TIOCM_DTR) 159 if (set & TIOCM_DTR)
181 urb_value |= SERIAL_MCR_DTR; 160 urb_value |= UART_MCR_DTR;
182 if (set & TIOCM_RTS) 161 if (set & TIOCM_RTS)
183 urb_value |= SERIAL_MCR_RTS; 162 urb_value |= UART_MCR_RTS;
184 163
185 result = ssu100_setregister(dev, 0, urb_value); 164 result = ssu100_setregister(dev, 0, UART_MCR, urb_value);
186 if (result < 0) 165 if (result < 0)
187 dbg("%s Error from MODEM_CTRL urb", __func__); 166 dbg("%s Error from MODEM_CTRL urb", __func__);
188 167
@@ -264,24 +243,24 @@ static void ssu100_set_termios(struct tty_struct *tty,
264 243
265 if (cflag & PARENB) { 244 if (cflag & PARENB) {
266 if (cflag & PARODD) 245 if (cflag & PARODD)
267 urb_value |= SERIAL_ODD_PARITY; 246 urb_value |= UART_LCR_PARITY;
268 else 247 else
269 urb_value |= SERIAL_EVEN_PARITY; 248 urb_value |= SERIAL_EVEN_PARITY;
270 } 249 }
271 250
272 switch (cflag & CSIZE) { 251 switch (cflag & CSIZE) {
273 case CS5: 252 case CS5:
274 urb_value |= SERIAL_5_DATA; 253 urb_value |= UART_LCR_WLEN5;
275 break; 254 break;
276 case CS6: 255 case CS6:
277 urb_value |= SERIAL_6_DATA; 256 urb_value |= UART_LCR_WLEN6;
278 break; 257 break;
279 case CS7: 258 case CS7:
280 urb_value |= SERIAL_7_DATA; 259 urb_value |= UART_LCR_WLEN7;
281 break; 260 break;
282 default: 261 default:
283 case CS8: 262 case CS8:
284 urb_value |= SERIAL_8_DATA; 263 urb_value |= UART_LCR_WLEN8;
285 break; 264 break;
286 } 265 }
287 266
@@ -333,6 +312,7 @@ static int ssu100_open(struct tty_struct *tty, struct usb_serial_port *port)
333 struct ssu100_port_private *priv = usb_get_serial_port_data(port); 312 struct ssu100_port_private *priv = usb_get_serial_port_data(port);
334 u8 *data; 313 u8 *data;
335 int result; 314 int result;
315 unsigned long flags;
336 316
337 dbg("%s - port %d", __func__, port->number); 317 dbg("%s - port %d", __func__, port->number);
338 318
@@ -350,11 +330,10 @@ static int ssu100_open(struct tty_struct *tty, struct usb_serial_port *port)
350 return result; 330 return result;
351 } 331 }
352 332
353 priv->shadowLSR = data[0] & (SERIAL_LSR_OE | SERIAL_LSR_PE | 333 spin_lock_irqsave(&priv->status_lock, flags);
354 SERIAL_LSR_FE | SERIAL_LSR_BI); 334 priv->shadowLSR = data[0];
355 335 priv->shadowMSR = data[1];
356 priv->shadowMSR = data[1] & (SERIAL_MSR_CTS | SERIAL_MSR_DSR | 336 spin_unlock_irqrestore(&priv->status_lock, flags);
357 SERIAL_MSR_RI | SERIAL_MSR_CD);
358 337
359 kfree(data); 338 kfree(data);
360 339
@@ -398,11 +377,51 @@ static int get_serial_info(struct usb_serial_port *port,
398 return 0; 377 return 0;
399} 378}
400 379
380static int wait_modem_info(struct usb_serial_port *port, unsigned int arg)
381{
382 struct ssu100_port_private *priv = usb_get_serial_port_data(port);
383 struct async_icount prev, cur;
384 unsigned long flags;
385
386 spin_lock_irqsave(&priv->status_lock, flags);
387 prev = priv->icount;
388 spin_unlock_irqrestore(&priv->status_lock, flags);
389
390 while (1) {
391 wait_event_interruptible(priv->delta_msr_wait,
392 ((priv->icount.rng != prev.rng) ||
393 (priv->icount.dsr != prev.dsr) ||
394 (priv->icount.dcd != prev.dcd) ||
395 (priv->icount.cts != prev.cts)));
396
397 if (signal_pending(current))
398 return -ERESTARTSYS;
399
400 spin_lock_irqsave(&priv->status_lock, flags);
401 cur = priv->icount;
402 spin_unlock_irqrestore(&priv->status_lock, flags);
403
404 if ((prev.rng == cur.rng) &&
405 (prev.dsr == cur.dsr) &&
406 (prev.dcd == cur.dcd) &&
407 (prev.cts == cur.cts))
408 return -EIO;
409
410 if ((arg & TIOCM_RNG && (prev.rng != cur.rng)) ||
411 (arg & TIOCM_DSR && (prev.dsr != cur.dsr)) ||
412 (arg & TIOCM_CD && (prev.dcd != cur.dcd)) ||
413 (arg & TIOCM_CTS && (prev.cts != cur.cts)))
414 return 0;
415 }
416 return 0;
417}
418
401static int ssu100_ioctl(struct tty_struct *tty, struct file *file, 419static int ssu100_ioctl(struct tty_struct *tty, struct file *file,
402 unsigned int cmd, unsigned long arg) 420 unsigned int cmd, unsigned long arg)
403{ 421{
404 struct usb_serial_port *port = tty->driver_data; 422 struct usb_serial_port *port = tty->driver_data;
405 struct ssu100_port_private *priv = usb_get_serial_port_data(port); 423 struct ssu100_port_private *priv = usb_get_serial_port_data(port);
424 void __user *user_arg = (void __user *)arg;
406 425
407 dbg("%s cmd 0x%04x", __func__, cmd); 426 dbg("%s cmd 0x%04x", __func__, cmd);
408 427
@@ -412,28 +431,28 @@ static int ssu100_ioctl(struct tty_struct *tty, struct file *file,
412 (struct serial_struct __user *) arg); 431 (struct serial_struct __user *) arg);
413 432
414 case TIOCMIWAIT: 433 case TIOCMIWAIT:
415 while (priv != NULL) { 434 return wait_modem_info(port, arg);
416 u8 prevMSR = priv->shadowMSR & SERIAL_MSR_MASK; 435
417 interruptible_sleep_on(&priv->delta_msr_wait); 436 case TIOCGICOUNT:
418 /* see if a signal did it */ 437 {
419 if (signal_pending(current)) 438 struct serial_icounter_struct icount;
420 return -ERESTARTSYS; 439 struct async_icount cnow = priv->icount;
421 else { 440 memset(&icount, 0, sizeof(icount));
422 u8 diff = (priv->shadowMSR & SERIAL_MSR_MASK) ^ prevMSR; 441 icount.cts = cnow.cts;
423 if (!diff) 442 icount.dsr = cnow.dsr;
424 return -EIO; /* no change => error */ 443 icount.rng = cnow.rng;
425 444 icount.dcd = cnow.dcd;
426 /* Return 0 if caller wanted to know about 445 icount.rx = cnow.rx;
427 these bits */ 446 icount.tx = cnow.tx;
428 447 icount.frame = cnow.frame;
429 if (((arg & TIOCM_RNG) && (diff & SERIAL_MSR_RI)) || 448 icount.overrun = cnow.overrun;
430 ((arg & TIOCM_DSR) && (diff & SERIAL_MSR_DSR)) || 449 icount.parity = cnow.parity;
431 ((arg & TIOCM_CD) && (diff & SERIAL_MSR_CD)) || 450 icount.brk = cnow.brk;
432 ((arg & TIOCM_CTS) && (diff & SERIAL_MSR_CTS))) 451 icount.buf_overrun = cnow.buf_overrun;
433 return 0; 452 if (copy_to_user(user_arg, &icount, sizeof(icount)))
434 } 453 return -EFAULT;
435 }
436 return 0; 454 return 0;
455 }
437 456
438 default: 457 default:
439 break; 458 break;
@@ -455,6 +474,7 @@ static void ssu100_set_max_packet_size(struct usb_serial_port *port)
455 474
456 unsigned num_endpoints; 475 unsigned num_endpoints;
457 int i; 476 int i;
477 unsigned long flags;
458 478
459 num_endpoints = interface->cur_altsetting->desc.bNumEndpoints; 479 num_endpoints = interface->cur_altsetting->desc.bNumEndpoints;
460 dev_info(&udev->dev, "Number of endpoints %d\n", num_endpoints); 480 dev_info(&udev->dev, "Number of endpoints %d\n", num_endpoints);
@@ -466,7 +486,9 @@ static void ssu100_set_max_packet_size(struct usb_serial_port *port)
466 } 486 }
467 487
468 /* set max packet size based on descriptor */ 488 /* set max packet size based on descriptor */
489 spin_lock_irqsave(&priv->status_lock, flags);
469 priv->max_packet_size = ep_desc->wMaxPacketSize; 490 priv->max_packet_size = ep_desc->wMaxPacketSize;
491 spin_unlock_irqrestore(&priv->status_lock, flags);
470 492
471 dev_info(&udev->dev, "Setting MaxPacketSize %d\n", priv->max_packet_size); 493 dev_info(&udev->dev, "Setting MaxPacketSize %d\n", priv->max_packet_size);
472} 494}
@@ -485,9 +507,9 @@ static int ssu100_attach(struct usb_serial *serial)
485 return -ENOMEM; 507 return -ENOMEM;
486 } 508 }
487 509
510 spin_lock_init(&priv->status_lock);
488 init_waitqueue_head(&priv->delta_msr_wait); 511 init_waitqueue_head(&priv->delta_msr_wait);
489 usb_set_serial_port_data(port, priv); 512 usb_set_serial_port_data(port, priv);
490
491 ssu100_set_max_packet_size(port); 513 ssu100_set_max_packet_size(port);
492 514
493 return ssu100_initdevice(serial->dev); 515 return ssu100_initdevice(serial->dev);
@@ -506,20 +528,20 @@ static int ssu100_tiocmget(struct tty_struct *tty, struct file *file)
506 if (!d) 528 if (!d)
507 return -ENOMEM; 529 return -ENOMEM;
508 530
509 r = ssu100_getregister(dev, 0, MODEM_CTL_REGISTER, d); 531 r = ssu100_getregister(dev, 0, UART_MCR, d);
510 if (r < 0) 532 if (r < 0)
511 goto mget_out; 533 goto mget_out;
512 534
513 r = ssu100_getregister(dev, 0, MODEM_STATUS_REGISTER, d+1); 535 r = ssu100_getregister(dev, 0, UART_MSR, d+1);
514 if (r < 0) 536 if (r < 0)
515 goto mget_out; 537 goto mget_out;
516 538
517 r = (d[0] & SERIAL_MCR_DTR ? TIOCM_DTR : 0) | 539 r = (d[0] & UART_MCR_DTR ? TIOCM_DTR : 0) |
518 (d[0] & SERIAL_MCR_RTS ? TIOCM_RTS : 0) | 540 (d[0] & UART_MCR_RTS ? TIOCM_RTS : 0) |
519 (d[1] & SERIAL_MSR_CTS ? TIOCM_CTS : 0) | 541 (d[1] & UART_MSR_CTS ? TIOCM_CTS : 0) |
520 (d[1] & SERIAL_MSR_CD ? TIOCM_CAR : 0) | 542 (d[1] & UART_MSR_DCD ? TIOCM_CAR : 0) |
521 (d[1] & SERIAL_MSR_RI ? TIOCM_RI : 0) | 543 (d[1] & UART_MSR_RI ? TIOCM_RI : 0) |
522 (d[1] & SERIAL_MSR_DSR ? TIOCM_DSR : 0); 544 (d[1] & UART_MSR_DSR ? TIOCM_DSR : 0);
523 545
524mget_out: 546mget_out:
525 kfree(d); 547 kfree(d);
@@ -546,7 +568,7 @@ static void ssu100_dtr_rts(struct usb_serial_port *port, int on)
546 if (!port->serial->disconnected) { 568 if (!port->serial->disconnected) {
547 /* Disable flow control */ 569 /* Disable flow control */
548 if (!on && 570 if (!on &&
549 ssu100_setregister(dev, 0, 0) < 0) 571 ssu100_setregister(dev, 0, UART_MCR, 0) < 0)
550 dev_err(&port->dev, "error from flowcontrol urb\n"); 572 dev_err(&port->dev, "error from flowcontrol urb\n");
551 /* drop RTS and DTR */ 573 /* drop RTS and DTR */
552 if (on) 574 if (on)
@@ -557,34 +579,88 @@ static void ssu100_dtr_rts(struct usb_serial_port *port, int on)
557 mutex_unlock(&port->serial->disc_mutex); 579 mutex_unlock(&port->serial->disc_mutex);
558} 580}
559 581
582static void ssu100_update_msr(struct usb_serial_port *port, u8 msr)
583{
584 struct ssu100_port_private *priv = usb_get_serial_port_data(port);
585 unsigned long flags;
586
587 spin_lock_irqsave(&priv->status_lock, flags);
588 priv->shadowMSR = msr;
589 spin_unlock_irqrestore(&priv->status_lock, flags);
590
591 if (msr & UART_MSR_ANY_DELTA) {
592 /* update input line counters */
593 if (msr & UART_MSR_DCTS)
594 priv->icount.cts++;
595 if (msr & UART_MSR_DDSR)
596 priv->icount.dsr++;
597 if (msr & UART_MSR_DDCD)
598 priv->icount.dcd++;
599 if (msr & UART_MSR_TERI)
600 priv->icount.rng++;
601 wake_up_interruptible(&priv->delta_msr_wait);
602 }
603}
604
605static void ssu100_update_lsr(struct usb_serial_port *port, u8 lsr,
606 char *tty_flag)
607{
608 struct ssu100_port_private *priv = usb_get_serial_port_data(port);
609 unsigned long flags;
610
611 spin_lock_irqsave(&priv->status_lock, flags);
612 priv->shadowLSR = lsr;
613 spin_unlock_irqrestore(&priv->status_lock, flags);
614
615 *tty_flag = TTY_NORMAL;
616 if (lsr & UART_LSR_BRK_ERROR_BITS) {
617 /* we always want to update icount, but we only want to
618 * update tty_flag for one case */
619 if (lsr & UART_LSR_BI) {
620 priv->icount.brk++;
621 *tty_flag = TTY_BREAK;
622 usb_serial_handle_break(port);
623 }
624 if (lsr & UART_LSR_PE) {
625 priv->icount.parity++;
626 if (*tty_flag == TTY_NORMAL)
627 *tty_flag = TTY_PARITY;
628 }
629 if (lsr & UART_LSR_FE) {
630 priv->icount.frame++;
631 if (*tty_flag == TTY_NORMAL)
632 *tty_flag = TTY_FRAME;
633 }
634 if (lsr & UART_LSR_OE){
635 priv->icount.overrun++;
636 if (*tty_flag == TTY_NORMAL)
637 *tty_flag = TTY_OVERRUN;
638 }
639 }
640
641}
642
560static int ssu100_process_packet(struct tty_struct *tty, 643static int ssu100_process_packet(struct tty_struct *tty,
561 struct usb_serial_port *port, 644 struct usb_serial_port *port,
562 struct ssu100_port_private *priv, 645 struct ssu100_port_private *priv,
563 char *packet, int len) 646 char *packet, int len)
564{ 647{
565 int i; 648 int i;
566 char flag; 649 char flag = TTY_NORMAL;
567 char *ch; 650 char *ch;
568 651
569 dbg("%s - port %d", __func__, port->number); 652 dbg("%s - port %d", __func__, port->number);
570 653
571 if (len < 4) { 654 if ((len >= 4) &&
572 dbg("%s - malformed packet", __func__); 655 (packet[0] == 0x1b) && (packet[1] == 0x1b) &&
573 return 0;
574 }
575
576 if ((packet[0] == 0x1b) && (packet[1] == 0x1b) &&
577 ((packet[2] == 0x00) || (packet[2] == 0x01))) { 656 ((packet[2] == 0x00) || (packet[2] == 0x01))) {
578 if (packet[2] == 0x00) 657 if (packet[2] == 0x00) {
579 priv->shadowLSR = packet[3] & (SERIAL_LSR_OE | 658 ssu100_update_lsr(port, packet[3], &flag);
580 SERIAL_LSR_PE | 659 if (flag == TTY_OVERRUN)
581 SERIAL_LSR_FE | 660 tty_insert_flip_char(tty, 0, TTY_OVERRUN);
582 SERIAL_LSR_BI);
583
584 if (packet[2] == 0x01) {
585 priv->shadowMSR = packet[3];
586 wake_up_interruptible(&priv->delta_msr_wait);
587 } 661 }
662 if (packet[2] == 0x01)
663 ssu100_update_msr(port, packet[3]);
588 664
589 len -= 4; 665 len -= 4;
590 ch = packet + 4; 666 ch = packet + 4;
@@ -596,7 +672,7 @@ static int ssu100_process_packet(struct tty_struct *tty,
596 672
597 if (port->port.console && port->sysrq) { 673 if (port->port.console && port->sysrq) {
598 for (i = 0; i < len; i++, ch++) { 674 for (i = 0; i < len; i++, ch++) {
599 if (!usb_serial_handle_sysrq_char(tty, port, *ch)) 675 if (!usb_serial_handle_sysrq_char(port, *ch))
600 tty_insert_flip_char(tty, *ch, flag); 676 tty_insert_flip_char(tty, *ch, flag);
601 } 677 }
602 } else 678 } else
@@ -631,7 +707,6 @@ static void ssu100_process_read_urb(struct urb *urb)
631 tty_kref_put(tty); 707 tty_kref_put(tty);
632} 708}
633 709
634
635static struct usb_serial_driver ssu100_device = { 710static struct usb_serial_driver ssu100_device = {
636 .driver = { 711 .driver = {
637 .owner = THIS_MODULE, 712 .owner = THIS_MODULE,
@@ -653,6 +728,7 @@ static struct usb_serial_driver ssu100_device = {
653 .tiocmset = ssu100_tiocmset, 728 .tiocmset = ssu100_tiocmset,
654 .ioctl = ssu100_ioctl, 729 .ioctl = ssu100_ioctl,
655 .set_termios = ssu100_set_termios, 730 .set_termios = ssu100_set_termios,
731 .disconnect = usb_serial_generic_disconnect,
656}; 732};
657 733
658static int __init ssu100_init(void) 734static int __init ssu100_init(void)
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 2a982e62963b..7a2177c79bde 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -736,6 +736,7 @@ int usb_serial_probe(struct usb_interface *interface,
736 736
737 serial = create_serial(dev, interface, type); 737 serial = create_serial(dev, interface, type);
738 if (!serial) { 738 if (!serial) {
739 module_put(type->driver.owner);
739 dev_err(&interface->dev, "%s - out of memory\n", __func__); 740 dev_err(&interface->dev, "%s - out of memory\n", __func__);
740 return -ENOMEM; 741 return -ENOMEM;
741 } 742 }
@@ -746,11 +747,11 @@ int usb_serial_probe(struct usb_interface *interface,
746 747
747 id = get_iface_id(type, interface); 748 id = get_iface_id(type, interface);
748 retval = type->probe(serial, id); 749 retval = type->probe(serial, id);
749 module_put(type->driver.owner);
750 750
751 if (retval) { 751 if (retval) {
752 dbg("sub driver rejected device"); 752 dbg("sub driver rejected device");
753 kfree(serial); 753 kfree(serial);
754 module_put(type->driver.owner);
754 return retval; 755 return retval;
755 } 756 }
756 } 757 }
@@ -822,6 +823,7 @@ int usb_serial_probe(struct usb_interface *interface,
822 if (num_bulk_in == 0 || num_bulk_out == 0) { 823 if (num_bulk_in == 0 || num_bulk_out == 0) {
823 dev_info(&interface->dev, "PL-2303 hack: descriptors matched but endpoints did not\n"); 824 dev_info(&interface->dev, "PL-2303 hack: descriptors matched but endpoints did not\n");
824 kfree(serial); 825 kfree(serial);
826 module_put(type->driver.owner);
825 return -ENODEV; 827 return -ENODEV;
826 } 828 }
827 } 829 }
@@ -835,22 +837,15 @@ int usb_serial_probe(struct usb_interface *interface,
835 dev_err(&interface->dev, 837 dev_err(&interface->dev,
836 "Generic device with no bulk out, not allowed.\n"); 838 "Generic device with no bulk out, not allowed.\n");
837 kfree(serial); 839 kfree(serial);
840 module_put(type->driver.owner);
838 return -EIO; 841 return -EIO;
839 } 842 }
840 } 843 }
841#endif 844#endif
842 if (!num_ports) { 845 if (!num_ports) {
843 /* if this device type has a calc_num_ports function, call it */ 846 /* if this device type has a calc_num_ports function, call it */
844 if (type->calc_num_ports) { 847 if (type->calc_num_ports)
845 if (!try_module_get(type->driver.owner)) {
846 dev_err(&interface->dev,
847 "module get failed, exiting\n");
848 kfree(serial);
849 return -EIO;
850 }
851 num_ports = type->calc_num_ports(serial); 848 num_ports = type->calc_num_ports(serial);
852 module_put(type->driver.owner);
853 }
854 if (!num_ports) 849 if (!num_ports)
855 num_ports = type->num_ports; 850 num_ports = type->num_ports;
856 } 851 }
@@ -1039,13 +1034,7 @@ int usb_serial_probe(struct usb_interface *interface,
1039 1034
1040 /* if this device type has an attach function, call it */ 1035 /* if this device type has an attach function, call it */
1041 if (type->attach) { 1036 if (type->attach) {
1042 if (!try_module_get(type->driver.owner)) {
1043 dev_err(&interface->dev,
1044 "module get failed, exiting\n");
1045 goto probe_error;
1046 }
1047 retval = type->attach(serial); 1037 retval = type->attach(serial);
1048 module_put(type->driver.owner);
1049 if (retval < 0) 1038 if (retval < 0)
1050 goto probe_error; 1039 goto probe_error;
1051 serial->attached = 1; 1040 serial->attached = 1;
@@ -1088,10 +1077,12 @@ int usb_serial_probe(struct usb_interface *interface,
1088exit: 1077exit:
1089 /* success */ 1078 /* success */
1090 usb_set_intfdata(interface, serial); 1079 usb_set_intfdata(interface, serial);
1080 module_put(type->driver.owner);
1091 return 0; 1081 return 0;
1092 1082
1093probe_error: 1083probe_error:
1094 usb_serial_put(serial); 1084 usb_serial_put(serial);
1085 module_put(type->driver.owner);
1095 return -EIO; 1086 return -EIO;
1096} 1087}
1097EXPORT_SYMBOL_GPL(usb_serial_probe); 1088EXPORT_SYMBOL_GPL(usb_serial_probe);
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 29e850a7a2f9..7c8008225ee3 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -243,7 +243,7 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
243 int r, nlogs = 0; 243 int r, nlogs = 0;
244 244
245 while (datalen > 0) { 245 while (datalen > 0) {
246 if (unlikely(headcount >= VHOST_NET_MAX_SG)) { 246 if (unlikely(seg >= VHOST_NET_MAX_SG)) {
247 r = -ENOBUFS; 247 r = -ENOBUFS;
248 goto err; 248 goto err;
249 } 249 }
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index e05557d52999..dd3d6f7406f8 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -60,22 +60,25 @@ static int vhost_poll_wakeup(wait_queue_t *wait, unsigned mode, int sync,
60 return 0; 60 return 0;
61} 61}
62 62
63static void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn)
64{
65 INIT_LIST_HEAD(&work->node);
66 work->fn = fn;
67 init_waitqueue_head(&work->done);
68 work->flushing = 0;
69 work->queue_seq = work->done_seq = 0;
70}
71
63/* Init poll structure */ 72/* Init poll structure */
64void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn, 73void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
65 unsigned long mask, struct vhost_dev *dev) 74 unsigned long mask, struct vhost_dev *dev)
66{ 75{
67 struct vhost_work *work = &poll->work;
68
69 init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup); 76 init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup);
70 init_poll_funcptr(&poll->table, vhost_poll_func); 77 init_poll_funcptr(&poll->table, vhost_poll_func);
71 poll->mask = mask; 78 poll->mask = mask;
72 poll->dev = dev; 79 poll->dev = dev;
73 80
74 INIT_LIST_HEAD(&work->node); 81 vhost_work_init(&poll->work, fn);
75 work->fn = fn;
76 init_waitqueue_head(&work->done);
77 work->flushing = 0;
78 work->queue_seq = work->done_seq = 0;
79} 82}
80 83
81/* Start polling a file. We add ourselves to file's wait queue. The caller must 84/* Start polling a file. We add ourselves to file's wait queue. The caller must
@@ -95,35 +98,38 @@ void vhost_poll_stop(struct vhost_poll *poll)
95 remove_wait_queue(poll->wqh, &poll->wait); 98 remove_wait_queue(poll->wqh, &poll->wait);
96} 99}
97 100
98/* Flush any work that has been scheduled. When calling this, don't hold any 101static void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
99 * locks that are also used by the callback. */
100void vhost_poll_flush(struct vhost_poll *poll)
101{ 102{
102 struct vhost_work *work = &poll->work;
103 unsigned seq; 103 unsigned seq;
104 int left; 104 int left;
105 int flushing; 105 int flushing;
106 106
107 spin_lock_irq(&poll->dev->work_lock); 107 spin_lock_irq(&dev->work_lock);
108 seq = work->queue_seq; 108 seq = work->queue_seq;
109 work->flushing++; 109 work->flushing++;
110 spin_unlock_irq(&poll->dev->work_lock); 110 spin_unlock_irq(&dev->work_lock);
111 wait_event(work->done, ({ 111 wait_event(work->done, ({
112 spin_lock_irq(&poll->dev->work_lock); 112 spin_lock_irq(&dev->work_lock);
113 left = seq - work->done_seq <= 0; 113 left = seq - work->done_seq <= 0;
114 spin_unlock_irq(&poll->dev->work_lock); 114 spin_unlock_irq(&dev->work_lock);
115 left; 115 left;
116 })); 116 }));
117 spin_lock_irq(&poll->dev->work_lock); 117 spin_lock_irq(&dev->work_lock);
118 flushing = --work->flushing; 118 flushing = --work->flushing;
119 spin_unlock_irq(&poll->dev->work_lock); 119 spin_unlock_irq(&dev->work_lock);
120 BUG_ON(flushing < 0); 120 BUG_ON(flushing < 0);
121} 121}
122 122
123void vhost_poll_queue(struct vhost_poll *poll) 123/* Flush any work that has been scheduled. When calling this, don't hold any
124 * locks that are also used by the callback. */
125void vhost_poll_flush(struct vhost_poll *poll)
126{
127 vhost_work_flush(poll->dev, &poll->work);
128}
129
130static inline void vhost_work_queue(struct vhost_dev *dev,
131 struct vhost_work *work)
124{ 132{
125 struct vhost_dev *dev = poll->dev;
126 struct vhost_work *work = &poll->work;
127 unsigned long flags; 133 unsigned long flags;
128 134
129 spin_lock_irqsave(&dev->work_lock, flags); 135 spin_lock_irqsave(&dev->work_lock, flags);
@@ -135,6 +141,11 @@ void vhost_poll_queue(struct vhost_poll *poll)
135 spin_unlock_irqrestore(&dev->work_lock, flags); 141 spin_unlock_irqrestore(&dev->work_lock, flags);
136} 142}
137 143
144void vhost_poll_queue(struct vhost_poll *poll)
145{
146 vhost_work_queue(poll->dev, &poll->work);
147}
148
138static void vhost_vq_reset(struct vhost_dev *dev, 149static void vhost_vq_reset(struct vhost_dev *dev,
139 struct vhost_virtqueue *vq) 150 struct vhost_virtqueue *vq)
140{ 151{
@@ -236,6 +247,29 @@ long vhost_dev_check_owner(struct vhost_dev *dev)
236 return dev->mm == current->mm ? 0 : -EPERM; 247 return dev->mm == current->mm ? 0 : -EPERM;
237} 248}
238 249
250struct vhost_attach_cgroups_struct {
251 struct vhost_work work;
252 struct task_struct *owner;
253 int ret;
254};
255
256static void vhost_attach_cgroups_work(struct vhost_work *work)
257{
258 struct vhost_attach_cgroups_struct *s;
259 s = container_of(work, struct vhost_attach_cgroups_struct, work);
260 s->ret = cgroup_attach_task_all(s->owner, current);
261}
262
263static int vhost_attach_cgroups(struct vhost_dev *dev)
264{
265 struct vhost_attach_cgroups_struct attach;
266 attach.owner = current;
267 vhost_work_init(&attach.work, vhost_attach_cgroups_work);
268 vhost_work_queue(dev, &attach.work);
269 vhost_work_flush(dev, &attach.work);
270 return attach.ret;
271}
272
239/* Caller should have device mutex */ 273/* Caller should have device mutex */
240static long vhost_dev_set_owner(struct vhost_dev *dev) 274static long vhost_dev_set_owner(struct vhost_dev *dev)
241{ 275{
@@ -255,14 +289,16 @@ static long vhost_dev_set_owner(struct vhost_dev *dev)
255 } 289 }
256 290
257 dev->worker = worker; 291 dev->worker = worker;
258 err = cgroup_attach_task_current_cg(worker); 292 wake_up_process(worker); /* avoid contributing to loadavg */
293
294 err = vhost_attach_cgroups(dev);
259 if (err) 295 if (err)
260 goto err_cgroup; 296 goto err_cgroup;
261 wake_up_process(worker); /* avoid contributing to loadavg */
262 297
263 return 0; 298 return 0;
264err_cgroup: 299err_cgroup:
265 kthread_stop(worker); 300 kthread_stop(worker);
301 dev->worker = NULL;
266err_worker: 302err_worker:
267 if (dev->mm) 303 if (dev->mm)
268 mmput(dev->mm); 304 mmput(dev->mm);
@@ -323,7 +359,10 @@ void vhost_dev_cleanup(struct vhost_dev *dev)
323 dev->mm = NULL; 359 dev->mm = NULL;
324 360
325 WARN_ON(!list_empty(&dev->work_list)); 361 WARN_ON(!list_empty(&dev->work_list));
326 kthread_stop(dev->worker); 362 if (dev->worker) {
363 kthread_stop(dev->worker);
364 dev->worker = NULL;
365 }
327} 366}
328 367
329static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz) 368static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
@@ -819,11 +858,12 @@ int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
819 if (r < 0) 858 if (r < 0)
820 return r; 859 return r;
821 len -= l; 860 len -= l;
822 if (!len) 861 if (!len) {
862 if (vq->log_ctx)
863 eventfd_signal(vq->log_ctx, 1);
823 return 0; 864 return 0;
865 }
824 } 866 }
825 if (vq->log_ctx)
826 eventfd_signal(vq->log_ctx, 1);
827 /* Length written exceeds what we have stored. This is a bug. */ 867 /* Length written exceeds what we have stored. This is a bug. */
828 BUG(); 868 BUG();
829 return 0; 869 return 0;
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 84f842331dfa..7ccc967831f0 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -3508,7 +3508,7 @@ static void fbcon_exit(void)
3508 softback_buf = 0UL; 3508 softback_buf = 0UL;
3509 3509
3510 for (i = 0; i < FB_MAX; i++) { 3510 for (i = 0; i < FB_MAX; i++) {
3511 int pending; 3511 int pending = 0;
3512 3512
3513 mapped = 0; 3513 mapped = 0;
3514 info = registered_fb[i]; 3514 info = registered_fb[i];
@@ -3516,7 +3516,8 @@ static void fbcon_exit(void)
3516 if (info == NULL) 3516 if (info == NULL)
3517 continue; 3517 continue;
3518 3518
3519 pending = cancel_work_sync(&info->queue); 3519 if (info->queue.func)
3520 pending = cancel_work_sync(&info->queue);
3520 DPRINTK("fbcon: %s pending work\n", (pending ? "canceled" : 3521 DPRINTK("fbcon: %s pending work\n", (pending ? "canceled" :
3521 "no")); 3522 "no"));
3522 3523
diff --git a/drivers/video/efifb.c b/drivers/video/efifb.c
index 815f84b07933..70477c2e4b61 100644
--- a/drivers/video/efifb.c
+++ b/drivers/video/efifb.c
@@ -13,7 +13,7 @@
13#include <linux/platform_device.h> 13#include <linux/platform_device.h>
14#include <linux/screen_info.h> 14#include <linux/screen_info.h>
15#include <linux/dmi.h> 15#include <linux/dmi.h>
16 16#include <linux/pci.h>
17#include <video/vga.h> 17#include <video/vga.h>
18 18
19static struct fb_var_screeninfo efifb_defined __devinitdata = { 19static struct fb_var_screeninfo efifb_defined __devinitdata = {
@@ -39,17 +39,31 @@ enum {
39 M_I20, /* 20-Inch iMac */ 39 M_I20, /* 20-Inch iMac */
40 M_I20_SR, /* 20-Inch iMac (Santa Rosa) */ 40 M_I20_SR, /* 20-Inch iMac (Santa Rosa) */
41 M_I24, /* 24-Inch iMac */ 41 M_I24, /* 24-Inch iMac */
42 M_I24_8_1, /* 24-Inch iMac, 8,1th gen */
43 M_I24_10_1, /* 24-Inch iMac, 10,1th gen */
44 M_I27_11_1, /* 27-Inch iMac, 11,1th gen */
42 M_MINI, /* Mac Mini */ 45 M_MINI, /* Mac Mini */
46 M_MINI_3_1, /* Mac Mini, 3,1th gen */
47 M_MINI_4_1, /* Mac Mini, 4,1th gen */
43 M_MB, /* MacBook */ 48 M_MB, /* MacBook */
44 M_MB_2, /* MacBook, 2nd rev. */ 49 M_MB_2, /* MacBook, 2nd rev. */
45 M_MB_3, /* MacBook, 3rd rev. */ 50 M_MB_3, /* MacBook, 3rd rev. */
51 M_MB_5_1, /* MacBook, 5th rev. */
52 M_MB_6_1, /* MacBook, 6th rev. */
53 M_MB_7_1, /* MacBook, 7th rev. */
46 M_MB_SR, /* MacBook, 2nd gen, (Santa Rosa) */ 54 M_MB_SR, /* MacBook, 2nd gen, (Santa Rosa) */
47 M_MBA, /* MacBook Air */ 55 M_MBA, /* MacBook Air */
48 M_MBP, /* MacBook Pro */ 56 M_MBP, /* MacBook Pro */
49 M_MBP_2, /* MacBook Pro 2nd gen */ 57 M_MBP_2, /* MacBook Pro 2nd gen */
58 M_MBP_2_2, /* MacBook Pro 2,2nd gen */
50 M_MBP_SR, /* MacBook Pro (Santa Rosa) */ 59 M_MBP_SR, /* MacBook Pro (Santa Rosa) */
51 M_MBP_4, /* MacBook Pro, 4th gen */ 60 M_MBP_4, /* MacBook Pro, 4th gen */
52 M_MBP_5_1, /* MacBook Pro, 5,1th gen */ 61 M_MBP_5_1, /* MacBook Pro, 5,1th gen */
62 M_MBP_5_2, /* MacBook Pro, 5,2th gen */
63 M_MBP_5_3, /* MacBook Pro, 5,3rd gen */
64 M_MBP_6_1, /* MacBook Pro, 6,1th gen */
65 M_MBP_6_2, /* MacBook Pro, 6,2th gen */
66 M_MBP_7_1, /* MacBook Pro, 7,1th gen */
53 M_UNKNOWN /* placeholder */ 67 M_UNKNOWN /* placeholder */
54}; 68};
55 69
@@ -64,14 +78,28 @@ static struct efifb_dmi_info {
64 [M_I20] = { "i20", 0x80010000, 1728 * 4, 1680, 1050 }, /* guess */ 78 [M_I20] = { "i20", 0x80010000, 1728 * 4, 1680, 1050 }, /* guess */
65 [M_I20_SR] = { "imac7", 0x40010000, 1728 * 4, 1680, 1050 }, 79 [M_I20_SR] = { "imac7", 0x40010000, 1728 * 4, 1680, 1050 },
66 [M_I24] = { "i24", 0x80010000, 2048 * 4, 1920, 1200 }, /* guess */ 80 [M_I24] = { "i24", 0x80010000, 2048 * 4, 1920, 1200 }, /* guess */
81 [M_I24_8_1] = { "imac8", 0xc0060000, 2048 * 4, 1920, 1200 },
82 [M_I24_10_1] = { "imac10", 0xc0010000, 2048 * 4, 1920, 1080 },
83 [M_I27_11_1] = { "imac11", 0xc0010000, 2560 * 4, 2560, 1440 },
67 [M_MINI]= { "mini", 0x80000000, 2048 * 4, 1024, 768 }, 84 [M_MINI]= { "mini", 0x80000000, 2048 * 4, 1024, 768 },
85 [M_MINI_3_1] = { "mini31", 0x40010000, 1024 * 4, 1024, 768 },
86 [M_MINI_4_1] = { "mini41", 0xc0010000, 2048 * 4, 1920, 1200 },
68 [M_MB] = { "macbook", 0x80000000, 2048 * 4, 1280, 800 }, 87 [M_MB] = { "macbook", 0x80000000, 2048 * 4, 1280, 800 },
88 [M_MB_5_1] = { "macbook51", 0x80010000, 2048 * 4, 1280, 800 },
89 [M_MB_6_1] = { "macbook61", 0x80010000, 2048 * 4, 1280, 800 },
90 [M_MB_7_1] = { "macbook71", 0x80010000, 2048 * 4, 1280, 800 },
69 [M_MBA] = { "mba", 0x80000000, 2048 * 4, 1280, 800 }, 91 [M_MBA] = { "mba", 0x80000000, 2048 * 4, 1280, 800 },
70 [M_MBP] = { "mbp", 0x80010000, 1472 * 4, 1440, 900 }, 92 [M_MBP] = { "mbp", 0x80010000, 1472 * 4, 1440, 900 },
71 [M_MBP_2] = { "mbp2", 0, 0, 0, 0 }, /* placeholder */ 93 [M_MBP_2] = { "mbp2", 0, 0, 0, 0 }, /* placeholder */
94 [M_MBP_2_2] = { "mbp22", 0x80010000, 1472 * 4, 1440, 900 },
72 [M_MBP_SR] = { "mbp3", 0x80030000, 2048 * 4, 1440, 900 }, 95 [M_MBP_SR] = { "mbp3", 0x80030000, 2048 * 4, 1440, 900 },
73 [M_MBP_4] = { "mbp4", 0xc0060000, 2048 * 4, 1920, 1200 }, 96 [M_MBP_4] = { "mbp4", 0xc0060000, 2048 * 4, 1920, 1200 },
74 [M_MBP_5_1] = { "mbp51", 0xc0010000, 2048 * 4, 1440, 900 }, 97 [M_MBP_5_1] = { "mbp51", 0xc0010000, 2048 * 4, 1440, 900 },
98 [M_MBP_5_2] = { "mbp52", 0xc0010000, 2048 * 4, 1920, 1200 },
99 [M_MBP_5_3] = { "mbp53", 0xd0010000, 2048 * 4, 1440, 900 },
100 [M_MBP_6_1] = { "mbp61", 0x90030000, 2048 * 4, 1920, 1200 },
101 [M_MBP_6_2] = { "mbp62", 0x90030000, 2048 * 4, 1680, 1050 },
102 [M_MBP_7_1] = { "mbp71", 0xc0010000, 2048 * 4, 1280, 800 },
75 [M_UNKNOWN] = { NULL, 0, 0, 0, 0 } 103 [M_UNKNOWN] = { NULL, 0, 0, 0, 0 }
76}; 104};
77 105
@@ -92,7 +120,12 @@ static const struct dmi_system_id dmi_system_table[] __initconst = {
92 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "iMac6,1", M_I24), 120 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "iMac6,1", M_I24),
93 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac6,1", M_I24), 121 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac6,1", M_I24),
94 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac7,1", M_I20_SR), 122 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac7,1", M_I20_SR),
123 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac8,1", M_I24_8_1),
124 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac10,1", M_I24_10_1),
125 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac11,1", M_I27_11_1),
95 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "Macmini1,1", M_MINI), 126 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "Macmini1,1", M_MINI),
127 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "Macmini3,1", M_MINI_3_1),
128 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "Macmini4,1", M_MINI_4_1),
96 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook1,1", M_MB), 129 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook1,1", M_MB),
97 /* At least one of these two will be right; maybe both? */ 130 /* At least one of these two will be right; maybe both? */
98 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook2,1", M_MB), 131 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook2,1", M_MB),
@@ -101,14 +134,23 @@ static const struct dmi_system_id dmi_system_table[] __initconst = {
101 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook3,1", M_MB), 134 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook3,1", M_MB),
102 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook3,1", M_MB), 135 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook3,1", M_MB),
103 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook4,1", M_MB), 136 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook4,1", M_MB),
137 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook5,1", M_MB_5_1),
138 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook6,1", M_MB_6_1),
139 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook7,1", M_MB_7_1),
104 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookAir1,1", M_MBA), 140 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookAir1,1", M_MBA),
105 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro1,1", M_MBP), 141 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro1,1", M_MBP),
106 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro2,1", M_MBP_2), 142 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro2,1", M_MBP_2),
143 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro2,2", M_MBP_2_2),
107 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro2,1", M_MBP_2), 144 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro2,1", M_MBP_2),
108 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro3,1", M_MBP_SR), 145 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro3,1", M_MBP_SR),
109 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro3,1", M_MBP_SR), 146 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro3,1", M_MBP_SR),
110 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro4,1", M_MBP_4), 147 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro4,1", M_MBP_4),
111 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,1", M_MBP_5_1), 148 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,1", M_MBP_5_1),
149 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,2", M_MBP_5_2),
150 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,3", M_MBP_5_3),
151 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro6,1", M_MBP_6_1),
152 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro6,2", M_MBP_6_2),
153 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro7,1", M_MBP_7_1),
112 {}, 154 {},
113}; 155};
114 156
@@ -116,7 +158,7 @@ static int set_system(const struct dmi_system_id *id)
116{ 158{
117 struct efifb_dmi_info *info = id->driver_data; 159 struct efifb_dmi_info *info = id->driver_data;
118 if (info->base == 0) 160 if (info->base == 0)
119 return -ENODEV; 161 return 0;
120 162
121 printk(KERN_INFO "efifb: dmi detected %s - framebuffer at %p " 163 printk(KERN_INFO "efifb: dmi detected %s - framebuffer at %p "
122 "(%dx%d, stride %d)\n", id->ident, 164 "(%dx%d, stride %d)\n", id->ident,
@@ -124,18 +166,55 @@ static int set_system(const struct dmi_system_id *id)
124 info->stride); 166 info->stride);
125 167
126 /* Trust the bootloader over the DMI tables */ 168 /* Trust the bootloader over the DMI tables */
127 if (screen_info.lfb_base == 0) 169 if (screen_info.lfb_base == 0) {
170#if defined(CONFIG_PCI)
171 struct pci_dev *dev = NULL;
172 int found_bar = 0;
173#endif
128 screen_info.lfb_base = info->base; 174 screen_info.lfb_base = info->base;
129 if (screen_info.lfb_linelength == 0)
130 screen_info.lfb_linelength = info->stride;
131 if (screen_info.lfb_width == 0)
132 screen_info.lfb_width = info->width;
133 if (screen_info.lfb_height == 0)
134 screen_info.lfb_height = info->height;
135 if (screen_info.orig_video_isVGA == 0)
136 screen_info.orig_video_isVGA = VIDEO_TYPE_EFI;
137 175
138 return 0; 176#if defined(CONFIG_PCI)
177 /* make sure that the address in the table is actually on a
178 * VGA device's PCI BAR */
179
180 for_each_pci_dev(dev) {
181 int i;
182 if ((dev->class >> 8) != PCI_CLASS_DISPLAY_VGA)
183 continue;
184 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
185 resource_size_t start, end;
186
187 start = pci_resource_start(dev, i);
188 if (start == 0)
189 break;
190 end = pci_resource_end(dev, i);
191 if (screen_info.lfb_base >= start &&
192 screen_info.lfb_base < end) {
193 found_bar = 1;
194 }
195 }
196 }
197 if (!found_bar)
198 screen_info.lfb_base = 0;
199#endif
200 }
201 if (screen_info.lfb_base) {
202 if (screen_info.lfb_linelength == 0)
203 screen_info.lfb_linelength = info->stride;
204 if (screen_info.lfb_width == 0)
205 screen_info.lfb_width = info->width;
206 if (screen_info.lfb_height == 0)
207 screen_info.lfb_height = info->height;
208 if (screen_info.orig_video_isVGA == 0)
209 screen_info.orig_video_isVGA = VIDEO_TYPE_EFI;
210 } else {
211 screen_info.lfb_linelength = 0;
212 screen_info.lfb_width = 0;
213 screen_info.lfb_height = 0;
214 screen_info.orig_video_isVGA = 0;
215 return 0;
216 }
217 return 1;
139} 218}
140 219
141static int efifb_setcolreg(unsigned regno, unsigned red, unsigned green, 220static int efifb_setcolreg(unsigned regno, unsigned red, unsigned green,
diff --git a/drivers/video/pxa168fb.c b/drivers/video/pxa168fb.c
index c91a7f70f7b0..a31a77ff6f3d 100644
--- a/drivers/video/pxa168fb.c
+++ b/drivers/video/pxa168fb.c
@@ -298,8 +298,8 @@ static void set_dma_control0(struct pxa168fb_info *fbi)
298 * Set bit to enable graphics DMA. 298 * Set bit to enable graphics DMA.
299 */ 299 */
300 x = readl(fbi->reg_base + LCD_SPU_DMA_CTRL0); 300 x = readl(fbi->reg_base + LCD_SPU_DMA_CTRL0);
301 x |= fbi->active ? 0x00000100 : 0; 301 x &= ~CFG_GRA_ENA_MASK;
302 fbi->active = 0; 302 x |= fbi->active ? CFG_GRA_ENA(1) : CFG_GRA_ENA(0);
303 303
304 /* 304 /*
305 * If we are in a pseudo-color mode, we need to enable 305 * If we are in a pseudo-color mode, we need to enable
@@ -559,7 +559,7 @@ static struct fb_ops pxa168fb_ops = {
559 .fb_imageblit = cfb_imageblit, 559 .fb_imageblit = cfb_imageblit,
560}; 560};
561 561
562static int __init pxa168fb_init_mode(struct fb_info *info, 562static int __devinit pxa168fb_init_mode(struct fb_info *info,
563 struct pxa168fb_mach_info *mi) 563 struct pxa168fb_mach_info *mi)
564{ 564{
565 struct pxa168fb_info *fbi = info->par; 565 struct pxa168fb_info *fbi = info->par;
@@ -599,7 +599,7 @@ static int __init pxa168fb_init_mode(struct fb_info *info,
599 return ret; 599 return ret;
600} 600}
601 601
602static int __init pxa168fb_probe(struct platform_device *pdev) 602static int __devinit pxa168fb_probe(struct platform_device *pdev)
603{ 603{
604 struct pxa168fb_mach_info *mi; 604 struct pxa168fb_mach_info *mi;
605 struct fb_info *info = 0; 605 struct fb_info *info = 0;
@@ -792,7 +792,7 @@ static struct platform_driver pxa168fb_driver = {
792 .probe = pxa168fb_probe, 792 .probe = pxa168fb_probe,
793}; 793};
794 794
795static int __devinit pxa168fb_init(void) 795static int __init pxa168fb_init(void)
796{ 796{
797 return platform_driver_register(&pxa168fb_driver); 797 return platform_driver_register(&pxa168fb_driver);
798} 798}
diff --git a/drivers/video/sis/sis_main.c b/drivers/video/sis/sis_main.c
index 559bf1727a2b..b52f8e4ef1fd 100644
--- a/drivers/video/sis/sis_main.c
+++ b/drivers/video/sis/sis_main.c
@@ -1701,6 +1701,9 @@ static int sisfb_ioctl(struct fb_info *info, unsigned int cmd,
1701 break; 1701 break;
1702 1702
1703 case FBIOGET_VBLANK: 1703 case FBIOGET_VBLANK:
1704
1705 memset(&sisvbblank, 0, sizeof(struct fb_vblank));
1706
1704 sisvbblank.count = 0; 1707 sisvbblank.count = 0;
1705 sisvbblank.flags = sisfb_setupvbblankflags(ivideo, &sisvbblank.vcount, &sisvbblank.hcount); 1708 sisvbblank.flags = sisfb_setupvbblankflags(ivideo, &sisvbblank.vcount, &sisvbblank.hcount);
1706 1709
diff --git a/drivers/video/via/ioctl.c b/drivers/video/via/ioctl.c
index da03c074e32a..4d553d0b8d7a 100644
--- a/drivers/video/via/ioctl.c
+++ b/drivers/video/via/ioctl.c
@@ -25,6 +25,8 @@ int viafb_ioctl_get_viafb_info(u_long arg)
25{ 25{
26 struct viafb_ioctl_info viainfo; 26 struct viafb_ioctl_info viainfo;
27 27
28 memset(&viainfo, 0, sizeof(struct viafb_ioctl_info));
29
28 viainfo.viafb_id = VIAID; 30 viainfo.viafb_id = VIAID;
29 viainfo.vendor_id = PCI_VIA_VENDOR_ID; 31 viainfo.vendor_id = PCI_VIA_VENDOR_ID;
30 32
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index b036677df8c4..24efd8ea41bb 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -213,11 +213,11 @@ config OMAP_WATCHDOG
213 here to enable the OMAP1610/OMAP1710/OMAP2420/OMAP3430/OMAP4430 watchdog timer. 213 here to enable the OMAP1610/OMAP1710/OMAP2420/OMAP3430/OMAP4430 watchdog timer.
214 214
215config PNX4008_WATCHDOG 215config PNX4008_WATCHDOG
216 tristate "PNX4008 Watchdog" 216 tristate "PNX4008 and LPC32XX Watchdog"
217 depends on ARCH_PNX4008 217 depends on ARCH_PNX4008 || ARCH_LPC32XX
218 help 218 help
219 Say Y here if to include support for the watchdog timer 219 Say Y here if to include support for the watchdog timer
220 in the PNX4008 processor. 220 in the PNX4008 or LPC32XX processor.
221 This driver can be built as a module by choosing M. The module 221 This driver can be built as a module by choosing M. The module
222 will be called pnx4008_wdt. 222 will be called pnx4008_wdt.
223 223
diff --git a/drivers/watchdog/sb_wdog.c b/drivers/watchdog/sb_wdog.c
index 88c83aa57303..f31493e65b38 100644
--- a/drivers/watchdog/sb_wdog.c
+++ b/drivers/watchdog/sb_wdog.c
@@ -305,7 +305,7 @@ static int __init sbwdog_init(void)
305 if (ret) { 305 if (ret) {
306 printk(KERN_ERR "%s: failed to request irq 1 - %d\n", 306 printk(KERN_ERR "%s: failed to request irq 1 - %d\n",
307 ident.identity, ret); 307 ident.identity, ret);
308 return ret; 308 goto out;
309 } 309 }
310 310
311 ret = misc_register(&sbwdog_miscdev); 311 ret = misc_register(&sbwdog_miscdev);
@@ -313,14 +313,20 @@ static int __init sbwdog_init(void)
313 printk(KERN_INFO "%s: timeout is %ld.%ld secs\n", 313 printk(KERN_INFO "%s: timeout is %ld.%ld secs\n",
314 ident.identity, 314 ident.identity,
315 timeout / 1000000, (timeout / 100000) % 10); 315 timeout / 1000000, (timeout / 100000) % 10);
316 } else 316 return 0;
317 free_irq(1, (void *)user_dog); 317 }
318 free_irq(1, (void *)user_dog);
319out:
320 unregister_reboot_notifier(&sbwdog_notifier);
321
318 return ret; 322 return ret;
319} 323}
320 324
321static void __exit sbwdog_exit(void) 325static void __exit sbwdog_exit(void)
322{ 326{
323 misc_deregister(&sbwdog_miscdev); 327 misc_deregister(&sbwdog_miscdev);
328 free_irq(1, (void *)user_dog);
329 unregister_reboot_notifier(&sbwdog_notifier);
324} 330}
325 331
326module_init(sbwdog_init); 332module_init(sbwdog_init);
diff --git a/drivers/watchdog/ts72xx_wdt.c b/drivers/watchdog/ts72xx_wdt.c
index 458c499c1223..18cdeb4c4258 100644
--- a/drivers/watchdog/ts72xx_wdt.c
+++ b/drivers/watchdog/ts72xx_wdt.c
@@ -449,6 +449,9 @@ static __devinit int ts72xx_wdt_probe(struct platform_device *pdev)
449 wdt->pdev = pdev; 449 wdt->pdev = pdev;
450 mutex_init(&wdt->lock); 450 mutex_init(&wdt->lock);
451 451
452 /* make sure that the watchdog is disabled */
453 ts72xx_wdt_stop(wdt);
454
452 error = misc_register(&ts72xx_wdt_miscdev); 455 error = misc_register(&ts72xx_wdt_miscdev);
453 if (error) { 456 if (error) {
454 dev_err(&pdev->dev, "failed to register miscdev\n"); 457 dev_err(&pdev->dev, "failed to register miscdev\n");
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 72f91bff29c7..13365ba35218 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -112,6 +112,7 @@ static inline unsigned long *cpu_evtchn_mask(int cpu)
112#define VALID_EVTCHN(chn) ((chn) != 0) 112#define VALID_EVTCHN(chn) ((chn) != 0)
113 113
114static struct irq_chip xen_dynamic_chip; 114static struct irq_chip xen_dynamic_chip;
115static struct irq_chip xen_percpu_chip;
115 116
116/* Constructor for packed IRQ information. */ 117/* Constructor for packed IRQ information. */
117static struct irq_info mk_unbound_info(void) 118static struct irq_info mk_unbound_info(void)
@@ -377,7 +378,7 @@ int bind_evtchn_to_irq(unsigned int evtchn)
377 irq = find_unbound_irq(); 378 irq = find_unbound_irq();
378 379
379 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, 380 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
380 handle_level_irq, "event"); 381 handle_edge_irq, "event");
381 382
382 evtchn_to_irq[evtchn] = irq; 383 evtchn_to_irq[evtchn] = irq;
383 irq_info[irq] = mk_evtchn_info(evtchn); 384 irq_info[irq] = mk_evtchn_info(evtchn);
@@ -403,8 +404,8 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
403 if (irq < 0) 404 if (irq < 0)
404 goto out; 405 goto out;
405 406
406 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, 407 set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
407 handle_level_irq, "ipi"); 408 handle_percpu_irq, "ipi");
408 409
409 bind_ipi.vcpu = cpu; 410 bind_ipi.vcpu = cpu;
410 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, 411 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
@@ -444,8 +445,8 @@ static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
444 445
445 irq = find_unbound_irq(); 446 irq = find_unbound_irq();
446 447
447 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, 448 set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
448 handle_level_irq, "virq"); 449 handle_percpu_irq, "virq");
449 450
450 evtchn_to_irq[evtchn] = irq; 451 evtchn_to_irq[evtchn] = irq;
451 irq_info[irq] = mk_virq_info(evtchn, virq); 452 irq_info[irq] = mk_virq_info(evtchn, virq);
@@ -964,6 +965,16 @@ static struct irq_chip xen_dynamic_chip __read_mostly = {
964 .retrigger = retrigger_dynirq, 965 .retrigger = retrigger_dynirq,
965}; 966};
966 967
968static struct irq_chip xen_percpu_chip __read_mostly = {
969 .name = "xen-percpu",
970
971 .disable = disable_dynirq,
972 .mask = disable_dynirq,
973 .unmask = enable_dynirq,
974
975 .ack = ack_dynirq,
976};
977
967int xen_set_callback_via(uint64_t via) 978int xen_set_callback_via(uint64_t via)
968{ 979{
969 struct xen_hvm_param a; 980 struct xen_hvm_param a;
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index 1799bd890315..ef9c7db52077 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -237,7 +237,7 @@ static void sysrq_handler(struct xenbus_watch *watch, const char **vec,
237 goto again; 237 goto again;
238 238
239 if (sysrq_key != '\0') 239 if (sysrq_key != '\0')
240 handle_sysrq(sysrq_key, NULL); 240 handle_sysrq(sysrq_key);
241} 241}
242 242
243static struct xenbus_watch sysrq_watch = { 243static struct xenbus_watch sysrq_watch = {
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index 29bac5118877..d409495876f1 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -755,7 +755,10 @@ int register_xenstore_notifier(struct notifier_block *nb)
755{ 755{
756 int ret = 0; 756 int ret = 0;
757 757
758 blocking_notifier_chain_register(&xenstore_chain, nb); 758 if (xenstored_ready > 0)
759 ret = nb->notifier_call(nb, 0, NULL);
760 else
761 blocking_notifier_chain_register(&xenstore_chain, nb);
759 762
760 return ret; 763 return ret;
761} 764}
@@ -769,7 +772,7 @@ EXPORT_SYMBOL_GPL(unregister_xenstore_notifier);
769 772
770void xenbus_probe(struct work_struct *unused) 773void xenbus_probe(struct work_struct *unused)
771{ 774{
772 BUG_ON((xenstored_ready <= 0)); 775 xenstored_ready = 1;
773 776
774 /* Enumerate devices in xenstore and watch for changes. */ 777 /* Enumerate devices in xenstore and watch for changes. */
775 xenbus_probe_devices(&xenbus_frontend); 778 xenbus_probe_devices(&xenbus_frontend);
@@ -835,8 +838,8 @@ static int __init xenbus_init(void)
835 xen_store_evtchn = xen_start_info->store_evtchn; 838 xen_store_evtchn = xen_start_info->store_evtchn;
836 xen_store_mfn = xen_start_info->store_mfn; 839 xen_store_mfn = xen_start_info->store_mfn;
837 xen_store_interface = mfn_to_virt(xen_store_mfn); 840 xen_store_interface = mfn_to_virt(xen_store_mfn);
841 xenstored_ready = 1;
838 } 842 }
839 xenstored_ready = 1;
840 } 843 }
841 844
842 /* Initialize the interface to xenstore. */ 845 /* Initialize the interface to xenstore. */